Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3 : : */
4 : :
5 : : #include <rte_flow.h>
6 : : #include <rte_flow_driver.h>
7 : : #include <rte_stdatomic.h>
8 : :
9 : : #include <mlx5_malloc.h>
10 : :
11 : : #include "mlx5.h"
12 : : #include "mlx5_defs.h"
13 : : #include "mlx5_flow.h"
14 : : #include "mlx5_rx.h"
15 : :
16 : : #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
17 : : #include "mlx5_hws_cnt.h"
18 : :
19 : : /** Fast path async flow API functions. */
20 : : static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops;
21 : :
22 : : /* The maximum actions support in the flow. */
23 : : #define MLX5_HW_MAX_ACTS 16
24 : :
25 : : /*
26 : : * The default ipool threshold value indicates which per_core_cache
27 : : * value to set.
28 : : */
29 : : #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
30 : : /* The default min local cache size. */
31 : : #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
32 : :
33 : : /* Default push burst threshold. */
34 : : #define BURST_THR 32u
35 : :
36 : : /* Default queue to flush the flows. */
37 : : #define MLX5_DEFAULT_FLUSH_QUEUE 0
38 : :
39 : : /* Maximum number of rules in control flow tables. */
40 : : #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
41 : :
42 : : /* Lowest flow group usable by an application if group translation is done. */
43 : : #define MLX5_HW_LOWEST_USABLE_GROUP (1)
44 : :
45 : : /* Maximum group index usable by user applications for transfer flows. */
46 : : #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
47 : :
48 : : /* Maximum group index usable by user applications for egress flows. */
49 : : #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
50 : :
51 : : /* Lowest priority for HW root table. */
52 : : #define MLX5_HW_LOWEST_PRIO_ROOT 15
53 : :
54 : : /* Lowest priority for HW non-root table. */
55 : : #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
56 : :
57 : : /* Priorities for Rx control flow rules. */
58 : : #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
59 : : #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
60 : : #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
61 : :
62 : : #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
63 : : #define MLX5_HW_VLAN_PUSH_VID_IDX 1
64 : : #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
65 : :
66 : : #define MLX5_MIRROR_MAX_CLONES_NUM 3
67 : : #define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
68 : :
69 : : #define MLX5_HW_PORT_IS_PROXY(priv) \
70 : : (!!((priv)->sh->esw_mode && (priv)->master))
71 : :
72 : :
73 : : struct mlx5_indlst_legacy {
74 : : struct mlx5_indirect_list indirect;
75 : : struct rte_flow_action_handle *handle;
76 : : enum rte_flow_action_type legacy_type;
77 : : };
78 : :
79 : : #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
80 : : (((const struct encap_type *)(ptr))->definition)
81 : :
82 : : /**
83 : : * Returns the size of a struct with a following layout:
84 : : *
85 : : * @code{.c}
86 : : * struct rte_flow_hw {
87 : : * // rte_flow_hw fields
88 : : * uint8_t rule[mlx5dr_rule_get_handle_size()];
89 : : * };
90 : : * @endcode
91 : : *
92 : : * Such struct is used as a basic container for HW Steering flow rule.
93 : : */
94 : : static size_t
95 : : mlx5_flow_hw_entry_size(void)
96 : : {
97 : 0 : return sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
98 : : }
99 : :
100 : : /**
101 : : * Returns the size of "auxed" rte_flow_hw structure which is assumed to be laid out as follows:
102 : : *
103 : : * @code{.c}
104 : : * struct {
105 : : * struct rte_flow_hw {
106 : : * // rte_flow_hw fields
107 : : * uint8_t rule[mlx5dr_rule_get_handle_size()];
108 : : * } flow;
109 : : * struct rte_flow_hw_aux aux;
110 : : * };
111 : : * @endcode
112 : : *
113 : : * Such struct is used whenever rte_flow_hw_aux cannot be allocated separately from the rte_flow_hw
114 : : * e.g., when table is resizable.
115 : : */
116 : : static size_t
117 : : mlx5_flow_hw_auxed_entry_size(void)
118 : : {
119 : 0 : size_t rule_size = mlx5dr_rule_get_handle_size();
120 : :
121 : 0 : return sizeof(struct rte_flow_hw) + rule_size + sizeof(struct rte_flow_hw_aux);
122 : : }
123 : :
124 : : /**
125 : : * Returns a valid pointer to rte_flow_hw_aux associated with given rte_flow_hw
126 : : * depending on template table configuration.
127 : : */
128 : : static __rte_always_inline struct rte_flow_hw_aux *
129 : : mlx5_flow_hw_aux(uint16_t port_id, struct rte_flow_hw *flow)
130 : : {
131 : 0 : struct rte_flow_template_table *table = flow->table;
132 : :
133 [ # # # # : 0 : if (rte_flow_template_table_resizable(port_id, &table->cfg.attr)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
134 : 0 : size_t offset = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
135 : :
136 : 0 : return RTE_PTR_ADD(flow, offset);
137 : : } else {
138 : 0 : return &table->flow_aux[flow->idx - 1];
139 : : }
140 : : }
141 : :
142 : : static __rte_always_inline void
143 : : mlx5_flow_hw_aux_set_age_idx(struct rte_flow_hw *flow,
144 : : struct rte_flow_hw_aux *aux,
145 : : uint32_t age_idx)
146 : : {
147 : : /*
148 : : * Only when creating a flow rule, the type will be set explicitly.
149 : : * Or else, it should be none in the rule update case.
150 : : */
151 [ # # # # : 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
# # # # #
# # # ]
152 : 0 : aux->upd.age_idx = age_idx;
153 : : else
154 : 0 : aux->orig.age_idx = age_idx;
155 : : }
156 : :
157 : : static __rte_always_inline uint32_t
158 : : mlx5_flow_hw_aux_get_age_idx(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
159 : : {
160 [ # # # # : 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
# # # # ]
161 : 0 : return aux->upd.age_idx;
162 : : else
163 : 0 : return aux->orig.age_idx;
164 : : }
165 : :
166 : : static __rte_always_inline void
167 : : mlx5_flow_hw_aux_set_mtr_id(struct rte_flow_hw *flow,
168 : : struct rte_flow_hw_aux *aux,
169 : : uint32_t mtr_id)
170 : : {
171 [ # # # # : 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
# # ]
172 : 0 : aux->upd.mtr_id = mtr_id;
173 : : else
174 : 0 : aux->orig.mtr_id = mtr_id;
175 : : }
176 : :
177 : : static __rte_always_inline uint32_t
178 : : mlx5_flow_hw_aux_get_mtr_id(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
179 : : {
180 [ # # ]: 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
181 : 0 : return aux->upd.mtr_id;
182 : : else
183 : 0 : return aux->orig.mtr_id;
184 : : }
185 : :
186 : : static __rte_always_inline struct mlx5_hw_q_job *
187 : : flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
188 : : const struct rte_flow_action_handle *handle,
189 : : void *user_data, void *query_data,
190 : : enum mlx5_hw_job_type type,
191 : : enum mlx5_hw_indirect_type indirect_type,
192 : : struct rte_flow_error *error);
193 : : static void
194 : : flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow,
195 : : struct rte_flow_error *error);
196 : :
197 : : static int
198 : : mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
199 : : struct rte_flow_template_table *tbl,
200 : : struct mlx5_multi_pattern_segment *segment,
201 : : uint32_t bulk_size,
202 : : struct rte_flow_error *error);
203 : : static void
204 : : mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment);
205 : :
206 : : static __rte_always_inline enum mlx5_indirect_list_type
207 : : flow_hw_inlist_type_get(const struct rte_flow_action *actions);
208 : :
209 : : static __rte_always_inline int
210 : : mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
211 : : {
212 : : switch (type) {
213 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
214 : : return 0;
215 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
216 : : return 1;
217 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
218 : : return 2;
219 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
220 : : return 3;
221 : : default:
222 : : break;
223 : : }
224 : : return -1;
225 : : }
226 : :
227 : : static __rte_always_inline enum mlx5dr_action_type
228 : : mlx5_multi_pattern_reformat_index_to_type(uint32_t ix)
229 : : {
230 : : switch (ix) {
231 : : case 0:
232 : : return MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
233 : : case 1:
234 : : return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
235 : : case 2:
236 : : return MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
237 : : case 3:
238 : : return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
239 : : default:
240 : : break;
241 : : }
242 : : return MLX5DR_ACTION_TYP_MAX;
243 : : }
244 : :
245 : : static inline enum mlx5dr_table_type
246 : : get_mlx5dr_table_type(const struct rte_flow_attr *attr)
247 : : {
248 : : enum mlx5dr_table_type type;
249 : :
250 [ # # # # ]: 0 : if (attr->transfer)
251 : : type = MLX5DR_TABLE_TYPE_FDB;
252 [ # # # # : 0 : else if (attr->egress)
# # # # #
# # # ]
253 : : type = MLX5DR_TABLE_TYPE_NIC_TX;
254 : : else
255 : : type = MLX5DR_TABLE_TYPE_NIC_RX;
256 : : return type;
257 : : }
258 : :
259 : : struct mlx5_mirror_clone {
260 : : enum rte_flow_action_type type;
261 : : void *action_ctx;
262 : : };
263 : :
264 : : struct mlx5_mirror {
265 : : struct mlx5_indirect_list indirect;
266 : : uint32_t clones_num;
267 : : struct mlx5dr_action *mirror_action;
268 : : struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
269 : : };
270 : :
271 : : static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
272 : : static int flow_hw_translate_group(struct rte_eth_dev *dev,
273 : : const struct mlx5_flow_template_table_cfg *cfg,
274 : : uint32_t group,
275 : : uint32_t *table_group,
276 : : struct rte_flow_error *error);
277 : : static __rte_always_inline int
278 : : flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
279 : : struct mlx5_modification_cmd *mhdr_cmd,
280 : : struct mlx5_action_construct_data *act_data,
281 : : const struct mlx5_hw_actions *hw_acts,
282 : : const struct rte_flow_action *action);
283 : : static void
284 : : flow_hw_construct_quota(struct mlx5_priv *priv,
285 : : struct mlx5dr_rule_action *rule_act, uint32_t qid);
286 : :
287 : : static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
288 : : static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
289 : :
290 : : const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
291 : :
292 : : /* DR action flags with different table. */
293 : : static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
294 : : [MLX5DR_TABLE_TYPE_MAX] = {
295 : : {
296 : : MLX5DR_ACTION_FLAG_ROOT_RX,
297 : : MLX5DR_ACTION_FLAG_ROOT_TX,
298 : : MLX5DR_ACTION_FLAG_ROOT_FDB,
299 : : },
300 : : {
301 : : MLX5DR_ACTION_FLAG_HWS_RX,
302 : : MLX5DR_ACTION_FLAG_HWS_TX,
303 : : MLX5DR_ACTION_FLAG_HWS_FDB,
304 : : },
305 : : };
306 : :
307 : : /* Ethernet item spec for promiscuous mode. */
308 : : static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
309 : : .hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
310 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
311 : : .hdr.ether_type = 0,
312 : : };
313 : : /* Ethernet item mask for promiscuous mode. */
314 : : static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
315 : : .hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
316 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
317 : : .hdr.ether_type = 0,
318 : : };
319 : :
320 : : /* Ethernet item spec for all multicast mode. */
321 : : static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
322 : : .hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
323 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
324 : : .hdr.ether_type = 0,
325 : : };
326 : : /* Ethernet item mask for all multicast mode. */
327 : : static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
328 : : .hdr.dst_addr.addr_bytes = "\x01\x00\x00\x00\x00\x00",
329 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
330 : : .hdr.ether_type = 0,
331 : : };
332 : :
333 : : /* Ethernet item spec for IPv4 multicast traffic. */
334 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
335 : : .hdr.dst_addr.addr_bytes = "\x01\x00\x5e\x00\x00\x00",
336 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
337 : : .hdr.ether_type = 0,
338 : : };
339 : : /* Ethernet item mask for IPv4 multicast traffic. */
340 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
341 : : .hdr.dst_addr.addr_bytes = "\xff\xff\xff\x00\x00\x00",
342 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
343 : : .hdr.ether_type = 0,
344 : : };
345 : :
346 : : /* Ethernet item spec for IPv6 multicast traffic. */
347 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
348 : : .hdr.dst_addr.addr_bytes = "\x33\x33\x00\x00\x00\x00",
349 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
350 : : .hdr.ether_type = 0,
351 : : };
352 : : /* Ethernet item mask for IPv6 multicast traffic. */
353 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
354 : : .hdr.dst_addr.addr_bytes = "\xff\xff\x00\x00\x00\x00",
355 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
356 : : .hdr.ether_type = 0,
357 : : };
358 : :
359 : : /* Ethernet item mask for unicast traffic. */
360 : : static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
361 : : .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
362 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
363 : : .hdr.ether_type = 0,
364 : : };
365 : :
366 : : /* Ethernet item spec for broadcast. */
367 : : static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
368 : : .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
369 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
370 : : .hdr.ether_type = 0,
371 : : };
372 : :
373 : : static inline uint32_t
374 : : flow_hw_q_pending(struct mlx5_priv *priv, uint32_t queue)
375 : : {
376 : 0 : struct mlx5_hw_q *q = &priv->hw_q[queue];
377 : :
378 : : MLX5_ASSERT(q->size >= q->job_idx);
379 : 0 : return (q->size - q->job_idx) + q->ongoing_flow_ops;
380 : : }
381 : :
382 : : static inline void
383 : 0 : flow_hw_q_inc_flow_ops(struct mlx5_priv *priv, uint32_t queue)
384 : : {
385 : 0 : struct mlx5_hw_q *q = &priv->hw_q[queue];
386 : :
387 : 0 : q->ongoing_flow_ops++;
388 : 0 : }
389 : :
390 : : static inline void
391 : : flow_hw_q_dec_flow_ops(struct mlx5_priv *priv, uint32_t queue)
392 : : {
393 : 0 : struct mlx5_hw_q *q = &priv->hw_q[queue];
394 : :
395 : 0 : q->ongoing_flow_ops--;
396 : : }
397 : :
398 : : static inline enum mlx5dr_matcher_insert_mode
399 : : flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
400 : : {
401 : 0 : if (insert_type == RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
402 : : return MLX5DR_MATCHER_INSERT_BY_HASH;
403 : : else
404 : 0 : return MLX5DR_MATCHER_INSERT_BY_INDEX;
405 : : }
406 : :
407 : : static inline enum mlx5dr_matcher_distribute_mode
408 : : flow_hw_matcher_distribute_mode_get(enum rte_flow_table_hash_func hash_func)
409 : : {
410 [ # # ]: 0 : if (hash_func == RTE_FLOW_TABLE_HASH_FUNC_LINEAR)
411 : : return MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR;
412 : : else
413 : 0 : return MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
414 : : }
415 : :
416 : : /**
417 : : * Set the hash fields according to the @p rss_desc information.
418 : : *
419 : : * @param[in] rss_desc
420 : : * Pointer to the mlx5_flow_rss_desc.
421 : : * @param[out] hash_fields
422 : : * Pointer to the RSS hash fields.
423 : : */
424 : : static void
425 : 0 : flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
426 : : uint64_t *hash_fields)
427 : : {
428 : : uint64_t fields = 0;
429 : : int rss_inner = 0;
430 [ # # ]: 0 : uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
431 : :
432 : : #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
433 [ # # ]: 0 : if (rss_desc->level >= 2)
434 : : rss_inner = 1;
435 : : #endif
436 [ # # ]: 0 : if (rss_types & MLX5_IPV4_LAYER_TYPES) {
437 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
438 : : fields |= IBV_RX_HASH_SRC_IPV4;
439 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
440 : : fields |= IBV_RX_HASH_DST_IPV4;
441 : : else
442 : : fields |= MLX5_IPV4_IBV_RX_HASH;
443 [ # # ]: 0 : } else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
444 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
445 : : fields |= IBV_RX_HASH_SRC_IPV6;
446 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
447 : : fields |= IBV_RX_HASH_DST_IPV6;
448 : : else
449 : : fields |= MLX5_IPV6_IBV_RX_HASH;
450 : : }
451 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_UDP) {
452 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
453 : 0 : fields |= IBV_RX_HASH_SRC_PORT_UDP;
454 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
455 : 0 : fields |= IBV_RX_HASH_DST_PORT_UDP;
456 : : else
457 : 0 : fields |= MLX5_UDP_IBV_RX_HASH;
458 [ # # ]: 0 : } else if (rss_types & RTE_ETH_RSS_TCP) {
459 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
460 : 0 : fields |= IBV_RX_HASH_SRC_PORT_TCP;
461 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
462 : 0 : fields |= IBV_RX_HASH_DST_PORT_TCP;
463 : : else
464 : 0 : fields |= MLX5_TCP_IBV_RX_HASH;
465 : : }
466 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_ESP)
467 : 0 : fields |= IBV_RX_HASH_IPSEC_SPI;
468 [ # # ]: 0 : if (rss_inner)
469 : 0 : fields |= IBV_RX_HASH_INNER;
470 : 0 : *hash_fields = fields;
471 : 0 : }
472 : :
473 : : /**
474 : : * Generate the matching pattern item flags.
475 : : *
476 : : * @param[in] items
477 : : * Pointer to the list of items.
478 : : *
479 : : * @return
480 : : * Matching item flags. RSS hash field function
481 : : * silently ignores the flags which are unsupported.
482 : : */
483 : : static uint64_t
484 : 0 : flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
485 : : {
486 : : uint64_t item_flags = 0;
487 : : uint64_t last_item = 0;
488 : :
489 [ # # ]: 0 : for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
490 : 0 : int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
491 : : int item_type = items->type;
492 : :
493 [ # # # # : 0 : switch (item_type) {
# # # # #
# # # #
# ]
494 : 0 : case RTE_FLOW_ITEM_TYPE_IPV4:
495 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
496 : : MLX5_FLOW_LAYER_OUTER_L3_IPV4;
497 : : break;
498 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6:
499 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
500 : : MLX5_FLOW_LAYER_OUTER_L3_IPV6;
501 : : break;
502 : 0 : case RTE_FLOW_ITEM_TYPE_TCP:
503 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
504 : : MLX5_FLOW_LAYER_OUTER_L4_TCP;
505 : : break;
506 : 0 : case RTE_FLOW_ITEM_TYPE_UDP:
507 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
508 : : MLX5_FLOW_LAYER_OUTER_L4_UDP;
509 : : break;
510 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
511 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
512 : : MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
513 : : break;
514 : 0 : case RTE_FLOW_ITEM_TYPE_GRE:
515 : : last_item = MLX5_FLOW_LAYER_GRE;
516 : 0 : break;
517 : 0 : case RTE_FLOW_ITEM_TYPE_NVGRE:
518 : : last_item = MLX5_FLOW_LAYER_GRE;
519 : 0 : break;
520 : 0 : case RTE_FLOW_ITEM_TYPE_VXLAN:
521 : : last_item = MLX5_FLOW_LAYER_VXLAN;
522 : 0 : break;
523 : 0 : case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
524 : : last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
525 : 0 : break;
526 : 0 : case RTE_FLOW_ITEM_TYPE_GENEVE:
527 : : last_item = MLX5_FLOW_LAYER_GENEVE;
528 : 0 : break;
529 : 0 : case RTE_FLOW_ITEM_TYPE_MPLS:
530 : : last_item = MLX5_FLOW_LAYER_MPLS;
531 : 0 : break;
532 : 0 : case RTE_FLOW_ITEM_TYPE_GTP:
533 : : last_item = MLX5_FLOW_LAYER_GTP;
534 : 0 : break;
535 : 0 : case RTE_FLOW_ITEM_TYPE_COMPARE:
536 : : last_item = MLX5_FLOW_ITEM_COMPARE;
537 : 0 : break;
538 : : default:
539 : : break;
540 : : }
541 : 0 : item_flags |= last_item;
542 : : }
543 : 0 : return item_flags;
544 : : }
545 : :
546 : : /**
547 : : * Register destination table DR jump action.
548 : : *
549 : : * @param[in] dev
550 : : * Pointer to the rte_eth_dev structure.
551 : : * @param[in] table_attr
552 : : * Pointer to the flow attributes.
553 : : * @param[in] dest_group
554 : : * The destination group ID.
555 : : * @param[out] error
556 : : * Pointer to error structure.
557 : : *
558 : : * @return
559 : : * Table on success, NULL otherwise and rte_errno is set.
560 : : */
561 : : static struct mlx5_hw_jump_action *
562 : 0 : flow_hw_jump_action_register(struct rte_eth_dev *dev,
563 : : const struct mlx5_flow_template_table_cfg *cfg,
564 : : uint32_t dest_group,
565 : : struct rte_flow_error *error)
566 : : {
567 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
568 : 0 : struct rte_flow_attr jattr = cfg->attr.flow_attr;
569 : : struct mlx5_flow_group *grp;
570 : 0 : struct mlx5_flow_cb_ctx ctx = {
571 : : .dev = dev,
572 : : .error = error,
573 : : .data = &jattr,
574 : : };
575 : : struct mlx5_list_entry *ge;
576 : : uint32_t target_group;
577 : :
578 : 0 : target_group = dest_group;
579 [ # # ]: 0 : if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
580 : : return NULL;
581 : 0 : jattr.group = target_group;
582 : 0 : ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
583 [ # # ]: 0 : if (!ge)
584 : : return NULL;
585 : : grp = container_of(ge, struct mlx5_flow_group, entry);
586 : 0 : return &grp->jump;
587 : : }
588 : :
589 : : /**
590 : : * Release jump action.
591 : : *
592 : : * @param[in] dev
593 : : * Pointer to the rte_eth_dev structure.
594 : : * @param[in] jump
595 : : * Pointer to the jump action.
596 : : */
597 : :
598 : : static void
599 : : flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
600 : : {
601 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
602 : : struct mlx5_flow_group *grp;
603 : :
604 : 0 : grp = container_of
605 : : (jump, struct mlx5_flow_group, jump);
606 : 0 : mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
607 : 0 : }
608 : :
609 : : /**
610 : : * Register queue/RSS action.
611 : : *
612 : : * @param[in] dev
613 : : * Pointer to the rte_eth_dev structure.
614 : : * @param[in] hws_flags
615 : : * DR action flags.
616 : : * @param[in] action
617 : : * rte flow action.
618 : : *
619 : : * @return
620 : : * Table on success, NULL otherwise and rte_errno is set.
621 : : */
622 : : static inline struct mlx5_hrxq*
623 : 0 : flow_hw_tir_action_register(struct rte_eth_dev *dev,
624 : : uint32_t hws_flags,
625 : : const struct rte_flow_action *action)
626 : : {
627 : 0 : struct mlx5_flow_rss_desc rss_desc = {
628 : : .hws_flags = hws_flags,
629 : : };
630 : : struct mlx5_hrxq *hrxq;
631 : :
632 [ # # ]: 0 : if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
633 : 0 : const struct rte_flow_action_queue *queue = action->conf;
634 : :
635 : 0 : rss_desc.const_q = &queue->index;
636 : 0 : rss_desc.queue_num = 1;
637 : : } else {
638 : 0 : const struct rte_flow_action_rss *rss = action->conf;
639 : :
640 : 0 : rss_desc.queue_num = rss->queue_num;
641 : 0 : rss_desc.const_q = rss->queue;
642 : 0 : memcpy(rss_desc.key,
643 [ # # ]: 0 : !rss->key ? rss_hash_default_key : rss->key,
644 : : MLX5_RSS_HASH_KEY_LEN);
645 : 0 : rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
646 [ # # ]: 0 : rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
647 : 0 : rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
648 : 0 : flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
649 : 0 : flow_dv_action_rss_l34_hash_adjust(rss->types,
650 : : &rss_desc.hash_fields);
651 [ # # ]: 0 : if (rss->level > 1) {
652 : 0 : rss_desc.hash_fields |= IBV_RX_HASH_INNER;
653 : 0 : rss_desc.tunnel = 1;
654 : : }
655 : : }
656 : 0 : hrxq = mlx5_hrxq_get(dev, &rss_desc);
657 : 0 : return hrxq;
658 : : }
659 : :
660 : : static __rte_always_inline int
661 : : flow_hw_ct_compile(struct rte_eth_dev *dev,
662 : : uint32_t queue, uint32_t idx,
663 : : struct mlx5dr_rule_action *rule_act)
664 : : {
665 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
666 : : struct mlx5_aso_ct_action *ct;
667 : :
668 : 0 : ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
669 [ # # # # : 0 : if (!ct || (!priv->shared_host && mlx5_aso_ct_available(priv->sh, queue, ct)))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # ]
670 : : return -1;
671 : 0 : rule_act->action = priv->hws_ctpool->dr_action;
672 : 0 : rule_act->aso_ct.offset = ct->offset;
673 : 0 : rule_act->aso_ct.direction = ct->is_original ?
674 : 0 : MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
675 : : MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
676 : : return 0;
677 : : }
678 : :
679 : : static void
680 : : flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
681 : : {
682 [ # # # # ]: 0 : if (encap_decap->action && !encap_decap->multi_pattern)
683 : 0 : mlx5dr_action_destroy(encap_decap->action);
684 : : }
685 : :
686 : : static void
687 : : flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
688 : : {
689 [ # # # # ]: 0 : if (mhdr->action && !mhdr->multi_pattern)
690 : 0 : mlx5dr_action_destroy(mhdr->action);
691 : : }
692 : :
693 : : /**
694 : : * Destroy DR actions created by action template.
695 : : *
696 : : * For DR actions created during table creation's action translate.
697 : : * Need to destroy the DR action when destroying the table.
698 : : *
699 : : * @param[in] dev
700 : : * Pointer to the rte_eth_dev structure.
701 : : * @param[in] acts
702 : : * Pointer to the template HW steering DR actions.
703 : : */
704 : : static void
705 : 0 : __flow_hw_action_template_destroy(struct rte_eth_dev *dev,
706 : : struct mlx5_hw_actions *acts)
707 : : {
708 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
709 : : struct mlx5_action_construct_data *data;
710 : :
711 [ # # ]: 0 : while (!LIST_EMPTY(&acts->act_list)) {
712 : : data = LIST_FIRST(&acts->act_list);
713 [ # # ]: 0 : LIST_REMOVE(data, next);
714 : 0 : mlx5_ipool_free(priv->acts_ipool, data->idx);
715 : : }
716 : :
717 [ # # ]: 0 : if (acts->mark)
718 [ # # ]: 0 : if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED) - 1))
719 : 0 : flow_hw_rxq_flag_set(dev, false);
720 : :
721 [ # # ]: 0 : if (acts->jump) {
722 : : struct mlx5_flow_group *grp;
723 : :
724 : 0 : grp = container_of
725 : : (acts->jump, struct mlx5_flow_group, jump);
726 : 0 : mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
727 : 0 : acts->jump = NULL;
728 : : }
729 [ # # ]: 0 : if (acts->tir) {
730 : 0 : mlx5_hrxq_release(dev, acts->tir->idx);
731 : 0 : acts->tir = NULL;
732 : : }
733 [ # # ]: 0 : if (acts->encap_decap) {
734 : : flow_hw_template_destroy_reformat_action(acts->encap_decap);
735 : 0 : mlx5_free(acts->encap_decap);
736 : 0 : acts->encap_decap = NULL;
737 : : }
738 [ # # ]: 0 : if (acts->push_remove) {
739 [ # # ]: 0 : if (acts->push_remove->action)
740 : 0 : mlx5dr_action_destroy(acts->push_remove->action);
741 : 0 : mlx5_free(acts->push_remove);
742 : 0 : acts->push_remove = NULL;
743 : : }
744 [ # # ]: 0 : if (acts->mhdr) {
745 : : flow_hw_template_destroy_mhdr_action(acts->mhdr);
746 : 0 : mlx5_free(acts->mhdr);
747 : 0 : acts->mhdr = NULL;
748 : : }
749 [ # # ]: 0 : if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
750 [ # # ]: 0 : mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
751 : 0 : acts->cnt_id = 0;
752 : : }
753 [ # # ]: 0 : if (acts->mtr_id) {
754 : 0 : mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
755 : 0 : acts->mtr_id = 0;
756 : : }
757 : 0 : }
758 : :
759 : : /**
760 : : * Append dynamic action to the dynamic action list.
761 : : *
762 : : * @param[in] priv
763 : : * Pointer to the port private data structure.
764 : : * @param[in] acts
765 : : * Pointer to the template HW steering DR actions.
766 : : * @param[in] type
767 : : * Action type.
768 : : * @param[in] action_src
769 : : * Offset of source rte flow action.
770 : : * @param[in] action_dst
771 : : * Offset of destination DR action.
772 : : *
773 : : * @return
774 : : * 0 on success, negative value otherwise and rte_errno is set.
775 : : */
776 : : static __rte_always_inline struct mlx5_action_construct_data *
777 : : __flow_hw_act_data_alloc(struct mlx5_priv *priv,
778 : : enum rte_flow_action_type type,
779 : : uint16_t action_src,
780 : : uint16_t action_dst)
781 : : {
782 : : struct mlx5_action_construct_data *act_data;
783 : 0 : uint32_t idx = 0;
784 : :
785 : 0 : act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
786 [ # # # # : 0 : if (!act_data)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
787 : : return NULL;
788 : 0 : act_data->idx = idx;
789 : 0 : act_data->type = type;
790 : 0 : act_data->action_src = action_src;
791 : 0 : act_data->action_dst = action_dst;
792 : : return act_data;
793 : : }
794 : :
795 : : /**
796 : : * Append dynamic action to the dynamic action list.
797 : : *
798 : : * @param[in] priv
799 : : * Pointer to the port private data structure.
800 : : * @param[in] acts
801 : : * Pointer to the template HW steering DR actions.
802 : : * @param[in] type
803 : : * Action type.
804 : : * @param[in] action_src
805 : : * Offset of source rte flow action.
806 : : * @param[in] action_dst
807 : : * Offset of destination DR action.
808 : : *
809 : : * @return
810 : : * 0 on success, negative value otherwise and rte_errno is set.
811 : : */
812 : : static __rte_always_inline int
813 : : __flow_hw_act_data_general_append(struct mlx5_priv *priv,
814 : : struct mlx5_hw_actions *acts,
815 : : enum rte_flow_action_type type,
816 : : uint16_t action_src,
817 : : uint16_t action_dst)
818 : : {
819 : : struct mlx5_action_construct_data *act_data;
820 : :
821 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
822 : : if (!act_data)
823 : : return -1;
824 [ # # # # : 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
825 : : return 0;
826 : : }
827 : :
828 : : static __rte_always_inline int
829 : : flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
830 : : struct mlx5_hw_actions *acts,
831 : : enum rte_flow_action_type type,
832 : : uint16_t action_src, uint16_t action_dst,
833 : : indirect_list_callback_t cb)
834 : : {
835 : : struct mlx5_action_construct_data *act_data;
836 : :
837 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
838 : : if (!act_data)
839 : 0 : return -1;
840 : 0 : act_data->indirect_list_cb = cb;
841 [ # # # # : 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
# # ]
842 : 0 : return 0;
843 : : }
844 : : /**
845 : : * Append dynamic encap action to the dynamic action list.
846 : : *
847 : : * @param[in] priv
848 : : * Pointer to the port private data structure.
849 : : * @param[in] acts
850 : : * Pointer to the template HW steering DR actions.
851 : : * @param[in] type
852 : : * Action type.
853 : : * @param[in] action_src
854 : : * Offset of source rte flow action.
855 : : * @param[in] action_dst
856 : : * Offset of destination DR action.
857 : : * @param[in] len
858 : : * Length of the data to be updated.
859 : : *
860 : : * @return
861 : : * 0 on success, negative value otherwise and rte_errno is set.
862 : : */
863 : : static __rte_always_inline int
864 : : __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
865 : : struct mlx5_hw_actions *acts,
866 : : enum rte_flow_action_type type,
867 : : uint16_t action_src,
868 : : uint16_t action_dst,
869 : : uint16_t len)
870 : : {
871 : : struct mlx5_action_construct_data *act_data;
872 : :
873 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
874 : : if (!act_data)
875 : : return -1;
876 : 0 : act_data->encap.len = len;
877 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
878 : : return 0;
879 : : }
880 : :
881 : : /**
882 : : * Append dynamic push action to the dynamic action list.
883 : : *
884 : : * @param[in] dev
885 : : * Pointer to the port.
886 : : * @param[in] acts
887 : : * Pointer to the template HW steering DR actions.
888 : : * @param[in] type
889 : : * Action type.
890 : : * @param[in] action_src
891 : : * Offset of source rte flow action.
892 : : * @param[in] action_dst
893 : : * Offset of destination DR action.
894 : : * @param[in] len
895 : : * Length of the data to be updated.
896 : : *
897 : : * @return
898 : : * Data pointer on success, NULL otherwise and rte_errno is set.
899 : : */
900 : : static __rte_always_inline void *
901 : : __flow_hw_act_data_push_append(struct rte_eth_dev *dev,
902 : : struct mlx5_hw_actions *acts,
903 : : enum rte_flow_action_type type,
904 : : uint16_t action_src,
905 : : uint16_t action_dst,
906 : : uint16_t len)
907 : : {
908 : : struct mlx5_action_construct_data *act_data;
909 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
910 : :
911 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
912 : : if (!act_data)
913 : : return NULL;
914 : 0 : act_data->ipv6_ext.len = len;
915 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
916 : : return act_data;
917 : : }
918 : :
919 : : static __rte_always_inline int
920 : : __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
921 : : struct mlx5_hw_actions *acts,
922 : : enum rte_flow_action_type type,
923 : : uint16_t action_src,
924 : : uint16_t action_dst,
925 : : uint16_t mhdr_cmds_off,
926 : : uint16_t mhdr_cmds_end,
927 : : bool shared,
928 : : struct field_modify_info *field,
929 : : struct field_modify_info *dcopy,
930 : : uint32_t *mask)
931 : : {
932 : : struct mlx5_action_construct_data *act_data;
933 : :
934 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
935 : : if (!act_data)
936 : : return -1;
937 : 0 : act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
938 : 0 : act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
939 : 0 : act_data->modify_header.shared = shared;
940 [ # # ]: 0 : rte_memcpy(act_data->modify_header.field, field,
941 : : sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
942 [ # # ]: 0 : rte_memcpy(act_data->modify_header.dcopy, dcopy,
943 : : sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
944 [ # # ]: 0 : rte_memcpy(act_data->modify_header.mask, mask,
945 : : sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
946 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
947 : : return 0;
948 : : }
949 : :
950 : : /**
951 : : * Append shared RSS action to the dynamic action list.
952 : : *
953 : : * @param[in] priv
954 : : * Pointer to the port private data structure.
955 : : * @param[in] acts
956 : : * Pointer to the template HW steering DR actions.
957 : : * @param[in] type
958 : : * Action type.
959 : : * @param[in] action_src
960 : : * Offset of source rte flow action.
961 : : * @param[in] action_dst
962 : : * Offset of destination DR action.
963 : : * @param[in] idx
964 : : * Shared RSS index.
965 : : * @param[in] rss
966 : : * Pointer to the shared RSS info.
967 : : *
968 : : * @return
969 : : * 0 on success, negative value otherwise and rte_errno is set.
970 : : */
971 : : static __rte_always_inline int
972 : : __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
973 : : struct mlx5_hw_actions *acts,
974 : : enum rte_flow_action_type type,
975 : : uint16_t action_src,
976 : : uint16_t action_dst,
977 : : uint32_t idx,
978 : : struct mlx5_shared_action_rss *rss)
979 : : {
980 : : struct mlx5_action_construct_data *act_data;
981 : :
982 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
983 : : if (!act_data)
984 : : return -1;
985 : 0 : act_data->shared_rss.level = rss->origin.level;
986 [ # # ]: 0 : act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
987 : : rss->origin.types;
988 : 0 : act_data->shared_rss.idx = idx;
989 : 0 : act_data->shared_rss.symmetric_hash_function =
990 : 0 : MLX5_RSS_IS_SYMM(rss->origin.func);
991 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
992 : : return 0;
993 : : }
994 : :
995 : : /**
996 : : * Append shared counter action to the dynamic action list.
997 : : *
998 : : * @param[in] priv
999 : : * Pointer to the port private data structure.
1000 : : * @param[in] acts
1001 : : * Pointer to the template HW steering DR actions.
1002 : : * @param[in] type
1003 : : * Action type.
1004 : : * @param[in] action_src
1005 : : * Offset of source rte flow action.
1006 : : * @param[in] action_dst
1007 : : * Offset of destination DR action.
1008 : : * @param[in] cnt_id
1009 : : * Shared counter id.
1010 : : *
1011 : : * @return
1012 : : * 0 on success, negative value otherwise and rte_errno is set.
1013 : : */
1014 : : static __rte_always_inline int
1015 : : __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
1016 : : struct mlx5_hw_actions *acts,
1017 : : enum rte_flow_action_type type,
1018 : : uint16_t action_src,
1019 : : uint16_t action_dst,
1020 : : cnt_id_t cnt_id)
1021 : : {
1022 : : struct mlx5_action_construct_data *act_data;
1023 : :
1024 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1025 : : if (!act_data)
1026 : : return -1;
1027 : : act_data->type = type;
1028 : 0 : act_data->shared_counter.id = cnt_id;
1029 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1030 : : return 0;
1031 : : }
1032 : :
1033 : : /**
1034 : : * Append shared meter_mark action to the dynamic action list.
1035 : : *
1036 : : * @param[in] priv
1037 : : * Pointer to the port private data structure.
1038 : : * @param[in] acts
1039 : : * Pointer to the template HW steering DR actions.
1040 : : * @param[in] type
1041 : : * Action type.
1042 : : * @param[in] action_src
1043 : : * Offset of source rte flow action.
1044 : : * @param[in] action_dst
1045 : : * Offset of destination DR action.
1046 : : * @param[in] mtr_id
1047 : : * Shared meter id.
1048 : : *
1049 : : * @return
1050 : : * 0 on success, negative value otherwise and rte_errno is set.
1051 : : */
1052 : : static __rte_always_inline int
1053 : : __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
1054 : : struct mlx5_hw_actions *acts,
1055 : : enum rte_flow_action_type type,
1056 : : uint16_t action_src,
1057 : : uint16_t action_dst,
1058 : : cnt_id_t mtr_id)
1059 : : { struct mlx5_action_construct_data *act_data;
1060 : :
1061 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1062 : : if (!act_data)
1063 : : return -1;
1064 : : act_data->type = type;
1065 : 0 : act_data->shared_meter.id = mtr_id;
1066 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1067 : : return 0;
1068 : : }
1069 : :
1070 : : /**
1071 : : * Translate shared indirect action.
1072 : : *
1073 : : * @param[in] dev
1074 : : * Pointer to the rte_eth_dev data structure.
1075 : : * @param[in] action
1076 : : * Pointer to the shared indirect rte_flow action.
1077 : : * @param[in] acts
1078 : : * Pointer to the template HW steering DR actions.
1079 : : * @param[in] action_src
1080 : : * Offset of source rte flow action.
1081 : : * @param[in] action_dst
1082 : : * Offset of destination DR action.
1083 : : *
1084 : : * @return
1085 : : * 0 on success, negative value otherwise and rte_errno is set.
1086 : : */
1087 : : static __rte_always_inline int
1088 : : flow_hw_shared_action_translate(struct rte_eth_dev *dev,
1089 : : const struct rte_flow_action *action,
1090 : : struct mlx5_hw_actions *acts,
1091 : : uint16_t action_src,
1092 : : uint16_t action_dst)
1093 : : {
1094 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1095 : : struct mlx5_shared_action_rss *shared_rss;
1096 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1097 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1098 : 0 : uint32_t idx = act_idx &
1099 : : ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1100 : :
1101 : 0 : switch (type) {
1102 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
1103 : 0 : shared_rss = mlx5_ipool_get
1104 : 0 : (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1105 [ # # ]: 0 : if (!shared_rss || __flow_hw_act_data_shared_rss_append
1106 : : (priv, acts,
1107 : : (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
1108 : : action_src, action_dst, idx, shared_rss))
1109 : : return -1;
1110 : : break;
1111 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1112 : : if (__flow_hw_act_data_shared_cnt_append(priv, acts,
1113 : : (enum rte_flow_action_type)
1114 : : MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
1115 : : action_src, action_dst, act_idx))
1116 : : return -1;
1117 : : break;
1118 : : case MLX5_INDIRECT_ACTION_TYPE_AGE:
1119 : : /* Not supported, prevent by validate function. */
1120 : : MLX5_ASSERT(0);
1121 : : break;
1122 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
1123 : : if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
1124 : : idx, &acts->rule_acts[action_dst]))
1125 : : return -1;
1126 : : break;
1127 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1128 : : if (__flow_hw_act_data_shared_mtr_append(priv, acts,
1129 : : (enum rte_flow_action_type)
1130 : : MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
1131 : : action_src, action_dst, idx))
1132 : : return -1;
1133 : : break;
1134 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
1135 : : flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
1136 : : break;
1137 : 0 : default:
1138 : 0 : DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
1139 : : break;
1140 : : }
1141 : : return 0;
1142 : : }
1143 : :
1144 : : static __rte_always_inline bool
1145 : : flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
1146 : : const struct rte_flow_action *mask)
1147 : : {
1148 : : const struct rte_flow_action_modify_field *v = action->conf;
1149 : 0 : const struct rte_flow_action_modify_field *m = mask->conf;
1150 : :
1151 : 0 : if (v->src.field == RTE_FLOW_FIELD_VALUE) {
1152 : : uint32_t j;
1153 : :
1154 [ # # ]: 0 : for (j = 0; j < RTE_DIM(m->src.value); ++j) {
1155 : : /*
1156 : : * Immediate value is considered to be masked
1157 : : * (and thus shared by all flow rules), if mask
1158 : : * is non-zero. Partial mask over immediate value
1159 : : * is not allowed.
1160 : : */
1161 [ # # ]: 0 : if (m->src.value[j])
1162 : : return true;
1163 : : }
1164 : : return false;
1165 : : }
1166 [ # # ]: 0 : if (v->src.field == RTE_FLOW_FIELD_POINTER)
1167 : 0 : return m->src.pvalue != NULL;
1168 : : /*
1169 : : * Source field types other than VALUE and
1170 : : * POINTER are always shared.
1171 : : */
1172 : : return true;
1173 : : }
1174 : :
1175 : : static __rte_always_inline bool
1176 : : flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
1177 : : const struct mlx5_modification_cmd *cmd)
1178 : : {
1179 : : struct mlx5_modification_cmd last_cmd = { { 0 } };
1180 : : struct mlx5_modification_cmd new_cmd = { { 0 } };
1181 : 0 : const uint32_t cmds_num = mhdr->mhdr_cmds_num;
1182 : : unsigned int last_type;
1183 : : bool should_insert = false;
1184 : :
1185 [ # # # # ]: 0 : if (cmds_num == 0)
1186 : : return false;
1187 : 0 : last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
1188 [ # # # # ]: 0 : last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
1189 [ # # # # ]: 0 : last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
1190 : 0 : last_type = last_cmd.action_type;
1191 : 0 : new_cmd = *cmd;
1192 [ # # # # ]: 0 : new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
1193 [ # # # # ]: 0 : new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
1194 [ # # # # : 0 : switch (new_cmd.action_type) {
# # ]
1195 : 0 : case MLX5_MODIFICATION_TYPE_SET:
1196 : : case MLX5_MODIFICATION_TYPE_ADD:
1197 [ # # # # ]: 0 : if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1198 : : last_type == MLX5_MODIFICATION_TYPE_ADD)
1199 : 0 : should_insert = new_cmd.field == last_cmd.field;
1200 : 0 : else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1201 [ # # # # ]: 0 : last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1202 : 0 : should_insert = new_cmd.field == last_cmd.dst_field;
1203 : : else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1204 : : should_insert = false;
1205 : : else
1206 : : MLX5_ASSERT(false); /* Other types are not supported. */
1207 : : break;
1208 : 0 : case MLX5_MODIFICATION_TYPE_COPY:
1209 : : case MLX5_MODIFICATION_TYPE_ADD_FIELD:
1210 [ # # # # ]: 0 : if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1211 : : last_type == MLX5_MODIFICATION_TYPE_ADD)
1212 [ # # # # ]: 0 : should_insert = (new_cmd.field == last_cmd.field ||
1213 [ # # # # ]: 0 : new_cmd.dst_field == last_cmd.field);
1214 : 0 : else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1215 [ # # # # ]: 0 : last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1216 [ # # # # ]: 0 : should_insert = (new_cmd.field == last_cmd.dst_field ||
1217 [ # # # # ]: 0 : new_cmd.dst_field == last_cmd.dst_field);
1218 : : else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1219 : : should_insert = false;
1220 : : else
1221 : : MLX5_ASSERT(false); /* Other types are not supported. */
1222 : : break;
1223 : : default:
1224 : : /* Other action types should be rejected on AT validation. */
1225 : : MLX5_ASSERT(false);
1226 : : break;
1227 : : }
1228 : : return should_insert;
1229 : : }
1230 : :
1231 : : static __rte_always_inline int
1232 : : flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
1233 : : {
1234 : : struct mlx5_modification_cmd *nop;
1235 : : uint32_t num = mhdr->mhdr_cmds_num;
1236 : :
1237 [ # # # # ]: 0 : if (num + 1 >= MLX5_MHDR_MAX_CMD)
1238 : : return -ENOMEM;
1239 : 0 : nop = mhdr->mhdr_cmds + num;
1240 : : nop->data0 = 0;
1241 : : nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
1242 : 0 : nop->data0 = rte_cpu_to_be_32(nop->data0);
1243 : 0 : nop->data1 = 0;
1244 : 0 : mhdr->mhdr_cmds_num = num + 1;
1245 : : return 0;
1246 : : }
1247 : :
1248 : : static __rte_always_inline int
1249 : : flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
1250 : : struct mlx5_modification_cmd *cmd)
1251 : : {
1252 : 0 : uint32_t num = mhdr->mhdr_cmds_num;
1253 : :
1254 [ # # ]: 0 : if (num + 1 >= MLX5_MHDR_MAX_CMD)
1255 : : return -ENOMEM;
1256 : 0 : mhdr->mhdr_cmds[num] = *cmd;
1257 : 0 : mhdr->mhdr_cmds_num = num + 1;
1258 : : return 0;
1259 : : }
1260 : :
1261 : : static __rte_always_inline int
1262 : : flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
1263 : : struct mlx5_flow_dv_modify_hdr_resource *resource)
1264 : : {
1265 : : uint32_t idx;
1266 : : int ret;
1267 : :
1268 [ # # ]: 0 : for (idx = 0; idx < resource->actions_num; ++idx) {
1269 : : struct mlx5_modification_cmd *src = &resource->actions[idx];
1270 : :
1271 [ # # ]: 0 : if (flow_hw_should_insert_nop(mhdr, src)) {
1272 : : ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1273 : : if (ret)
1274 : : return ret;
1275 : : }
1276 : : ret = flow_hw_mhdr_cmd_append(mhdr, src);
1277 : : if (ret)
1278 : : return ret;
1279 : : }
1280 : : return 0;
1281 : : }
1282 : :
1283 : : static __rte_always_inline void
1284 : : flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
1285 : : struct rte_flow_actions_template *at)
1286 : : {
1287 : : memset(mhdr, 0, sizeof(*mhdr));
1288 : : /* Modify header action without any commands is shared by default. */
1289 : 0 : mhdr->shared = true;
1290 : 0 : mhdr->pos = at->mhdr_off;
1291 : : }
1292 : :
1293 : : static __rte_always_inline int
1294 : : flow_hw_modify_field_compile(struct rte_eth_dev *dev,
1295 : : const struct rte_flow_attr *attr,
1296 : : const struct rte_flow_action *action, /* Current action from AT. */
1297 : : const struct rte_flow_action *action_mask, /* Current mask from AT. */
1298 : : struct mlx5_hw_actions *acts,
1299 : : struct mlx5_hw_modify_header_action *mhdr,
1300 : : uint16_t src_pos,
1301 : : struct rte_flow_error *error)
1302 : : {
1303 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1304 : 0 : const struct rte_flow_action_modify_field *conf = action->conf;
1305 : : union {
1306 : : struct mlx5_flow_dv_modify_hdr_resource resource;
1307 : : uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
1308 : : sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
1309 : : } dummy;
1310 : : struct mlx5_flow_dv_modify_hdr_resource *resource;
1311 : 0 : struct rte_flow_item item = {
1312 : : .spec = NULL,
1313 : : .mask = NULL
1314 : : };
1315 : 0 : struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1316 : : {0, 0, MLX5_MODI_OUT_NONE} };
1317 : 0 : struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1318 : : {0, 0, MLX5_MODI_OUT_NONE} };
1319 : 0 : uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
1320 : 0 : uint32_t type, value = 0;
1321 : : uint16_t cmds_start, cmds_end;
1322 : : bool shared;
1323 : : int ret;
1324 : :
1325 : : /*
1326 : : * Modify header action is shared if previous modify_field actions
1327 : : * are shared and currently compiled action is shared.
1328 : : */
1329 : : shared = flow_hw_action_modify_field_is_shared(action, action_mask);
1330 : 0 : mhdr->shared &= shared;
1331 [ # # ]: 0 : if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1332 : : conf->src.field == RTE_FLOW_FIELD_VALUE) {
1333 [ # # ]: 0 : type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
1334 : : MLX5_MODIFICATION_TYPE_ADD;
1335 : : /* For SET/ADD fill the destination field (field) first. */
1336 : 0 : mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1337 : 0 : conf->width, dev,
1338 : : attr, error);
1339 : 0 : item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1340 [ # # ]: 0 : (void *)(uintptr_t)conf->src.pvalue :
1341 : : (void *)(uintptr_t)&conf->src.value;
1342 [ # # ]: 0 : if (conf->dst.field == RTE_FLOW_FIELD_META ||
1343 [ # # ]: 0 : conf->dst.field == RTE_FLOW_FIELD_TAG ||
1344 [ # # ]: 0 : conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1345 : 0 : conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1346 : : uint8_t tag_index = flow_tag_index_get(&conf->dst);
1347 : :
1348 : 0 : value = *(const unaligned_uint32_t *)item.spec;
1349 [ # # # # ]: 0 : if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
1350 : : tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1351 [ # # ]: 0 : value = rte_cpu_to_be_32(value << 16);
1352 : : else
1353 [ # # ]: 0 : value = rte_cpu_to_be_32(value);
1354 : 0 : item.spec = &value;
1355 [ # # ]: 0 : } else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
1356 : : conf->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
1357 : : /*
1358 : : * Both QFI and Geneve option type are passed as an uint8_t integer,
1359 : : * but it is accessed through a 2nd least significant byte of a 32-bit
1360 : : * field in modify header command.
1361 : : */
1362 : 0 : value = *(const uint8_t *)item.spec;
1363 [ # # ]: 0 : value = rte_cpu_to_be_32(value << 8);
1364 : 0 : item.spec = &value;
1365 : : }
1366 : : } else {
1367 : 0 : type = conf->operation == RTE_FLOW_MODIFY_SET ?
1368 [ # # ]: 0 : MLX5_MODIFICATION_TYPE_COPY : MLX5_MODIFICATION_TYPE_ADD_FIELD;
1369 : : /* For COPY fill the destination field (dcopy) without mask. */
1370 : 0 : mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1371 : 0 : conf->width, dev,
1372 : : attr, error);
1373 : : /* Then construct the source field (field) with mask. */
1374 : 0 : mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1375 : 0 : conf->width, dev,
1376 : : attr, error);
1377 : : }
1378 : 0 : item.mask = &mask;
1379 : : memset(&dummy, 0, sizeof(dummy));
1380 : : resource = &dummy.resource;
1381 : 0 : ret = flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1382 [ # # ]: 0 : if (ret)
1383 : : return ret;
1384 : : MLX5_ASSERT(resource->actions_num > 0);
1385 : : /*
1386 : : * If previous modify field action collide with this one, then insert NOP command.
1387 : : * This NOP command will not be a part of action's command range used to update commands
1388 : : * on rule creation.
1389 : : */
1390 [ # # ]: 0 : if (flow_hw_should_insert_nop(mhdr, &resource->actions[0])) {
1391 : : ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1392 : : if (ret)
1393 : 0 : return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1394 : : NULL, "too many modify field operations specified");
1395 : : }
1396 : 0 : cmds_start = mhdr->mhdr_cmds_num;
1397 : : ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource);
1398 [ # # ]: 0 : if (ret)
1399 : 0 : return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1400 : : NULL, "too many modify field operations specified");
1401 : :
1402 : 0 : cmds_end = mhdr->mhdr_cmds_num;
1403 [ # # ]: 0 : if (shared)
1404 : : return 0;
1405 : : ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1406 : 0 : src_pos, mhdr->pos,
1407 : : cmds_start, cmds_end, shared,
1408 : : field, dcopy, mask);
1409 : : if (ret)
1410 : 0 : return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1411 : : NULL, "not enough memory to store modify field metadata");
1412 : : return 0;
1413 : : }
1414 : :
1415 : : static uint32_t
1416 : 0 : flow_hw_count_nop_modify_field(struct mlx5_hw_modify_header_action *mhdr)
1417 : : {
1418 : : uint32_t i;
1419 : : uint32_t nops = 0;
1420 : :
1421 [ # # ]: 0 : for (i = 0; i < mhdr->mhdr_cmds_num; ++i) {
1422 : 0 : struct mlx5_modification_cmd cmd = mhdr->mhdr_cmds[i];
1423 : :
1424 [ # # ]: 0 : cmd.data0 = rte_be_to_cpu_32(cmd.data0);
1425 [ # # ]: 0 : if (cmd.action_type == MLX5_MODIFICATION_TYPE_NOP)
1426 : 0 : ++nops;
1427 : : }
1428 : 0 : return nops;
1429 : : }
1430 : :
1431 : : static int
1432 : 0 : flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
1433 : : const struct mlx5_flow_template_table_cfg *cfg,
1434 : : struct mlx5_hw_modify_header_action *mhdr,
1435 : : struct rte_flow_error *error)
1436 : : {
1437 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1438 : 0 : struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1439 : :
1440 : : /*
1441 : : * Header modify pattern length limitation is only valid for HWS groups, i.e. groups > 0.
1442 : : * In group 0, MODIFY_FIELD actions are handled with header modify actions
1443 : : * managed by rdma-core.
1444 : : */
1445 [ # # ]: 0 : if (cfg->attr.flow_attr.group != 0 &&
1446 [ # # ]: 0 : mhdr->mhdr_cmds_num > hca_attr->max_header_modify_pattern_length) {
1447 : 0 : uint32_t nops = flow_hw_count_nop_modify_field(mhdr);
1448 : :
1449 : 0 : DRV_LOG(ERR, "Too many modify header commands generated from "
1450 : : "MODIFY_FIELD actions. "
1451 : : "Generated HW commands = %u (amount of NOP commands = %u). "
1452 : : "Maximum supported = %u.",
1453 : : mhdr->mhdr_cmds_num, nops,
1454 : : hca_attr->max_header_modify_pattern_length);
1455 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1456 : : "Number of MODIFY_FIELD actions exceeds maximum "
1457 : : "supported limit of actions");
1458 : : }
1459 : : return 0;
1460 : : }
1461 : :
1462 : : static int
1463 : 0 : flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1464 : : const struct rte_flow_attr *attr,
1465 : : const struct rte_flow_action *action,
1466 : : const struct rte_flow_action *action_mask,
1467 : : struct mlx5_hw_actions *acts,
1468 : : uint16_t action_src, uint16_t action_dst,
1469 : : struct rte_flow_error *error)
1470 : : {
1471 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1472 : 0 : const struct rte_flow_action_ethdev *v = action->conf;
1473 : 0 : const struct rte_flow_action_ethdev *m = action_mask->conf;
1474 : : int ret;
1475 : :
1476 [ # # ]: 0 : if (!attr->group)
1477 : 0 : return rte_flow_error_set(error, EINVAL,
1478 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1479 : : "represented_port action cannot"
1480 : : " be used on group 0");
1481 [ # # ]: 0 : if (!attr->transfer)
1482 : 0 : return rte_flow_error_set(error, EINVAL,
1483 : : RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1484 : : NULL,
1485 : : "represented_port action requires"
1486 : : " transfer attribute");
1487 [ # # ]: 0 : if (attr->ingress || attr->egress)
1488 : 0 : return rte_flow_error_set(error, EINVAL,
1489 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1490 : : "represented_port action cannot"
1491 : : " be used with direction attributes");
1492 [ # # ]: 0 : if (!priv->master)
1493 : 0 : return rte_flow_error_set(error, EINVAL,
1494 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1495 : : "represented_port action must"
1496 : : " be used on proxy port");
1497 [ # # # # ]: 0 : if (m && !!m->port_id) {
1498 : : struct mlx5_priv *port_priv;
1499 : :
1500 [ # # ]: 0 : if (!v)
1501 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1502 : : action, "port index was not provided");
1503 : 0 : port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1504 [ # # ]: 0 : if (port_priv == NULL)
1505 : 0 : return rte_flow_error_set
1506 : : (error, EINVAL,
1507 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1508 : : "port does not exist or unable to"
1509 : : " obtain E-Switch info for port");
1510 : : MLX5_ASSERT(priv->hw_vport != NULL);
1511 [ # # ]: 0 : if (priv->hw_vport[v->port_id]) {
1512 : 0 : acts->rule_acts[action_dst].action =
1513 : : priv->hw_vport[v->port_id];
1514 : : } else {
1515 : 0 : return rte_flow_error_set
1516 : : (error, EINVAL,
1517 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1518 : : "cannot use represented_port action"
1519 : : " with this port");
1520 : : }
1521 : : } else {
1522 : : ret = __flow_hw_act_data_general_append
1523 : 0 : (priv, acts, action->type,
1524 : : action_src, action_dst);
1525 : : if (ret)
1526 : 0 : return rte_flow_error_set
1527 : : (error, ENOMEM,
1528 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1529 : : "not enough memory to store"
1530 : : " vport action");
1531 : : }
1532 : : return 0;
1533 : : }
1534 : :
1535 : : static __rte_always_inline int
1536 : : flow_hw_meter_compile(struct rte_eth_dev *dev,
1537 : : const struct mlx5_flow_template_table_cfg *cfg,
1538 : : uint16_t aso_mtr_pos,
1539 : : uint16_t jump_pos,
1540 : : const struct rte_flow_action *action,
1541 : : struct mlx5_hw_actions *acts,
1542 : : struct rte_flow_error *error)
1543 : : {
1544 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1545 : : struct mlx5_aso_mtr *aso_mtr;
1546 : : const struct rte_flow_action_meter *meter = action->conf;
1547 : 0 : uint32_t group = cfg->attr.flow_attr.group;
1548 : :
1549 : 0 : aso_mtr = mlx5_aso_meter_by_idx(priv, meter->mtr_id);
1550 : 0 : acts->rule_acts[aso_mtr_pos].action = priv->mtr_bulk.action;
1551 : 0 : acts->rule_acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1552 : 0 : acts->jump = flow_hw_jump_action_register
1553 : 0 : (dev, cfg, aso_mtr->fm.group, error);
1554 [ # # ]: 0 : if (!acts->jump)
1555 : : return -ENOMEM;
1556 : 0 : acts->rule_acts[jump_pos].action = (!!group) ?
1557 [ # # ]: 0 : acts->jump->hws_action :
1558 : : acts->jump->root_action;
1559 [ # # ]: 0 : if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
1560 : : return -ENOMEM;
1561 : : return 0;
1562 : : }
1563 : :
1564 : : static __rte_always_inline int
1565 : : flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t start_pos,
1566 : : struct mlx5_hw_actions *acts)
1567 : : {
1568 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1569 : : uint32_t pos = start_pos;
1570 : : cnt_id_t cnt_id;
1571 : : int ret;
1572 : :
1573 : 0 : ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1574 : : if (ret != 0)
1575 : : return ret;
1576 : 0 : ret = mlx5_hws_cnt_pool_get_action_offset
1577 : : (priv->hws_cpool,
1578 : : cnt_id,
1579 : : &acts->rule_acts[pos].action,
1580 : : &acts->rule_acts[pos].counter.offset);
1581 : : if (ret != 0)
1582 : : return ret;
1583 : 0 : acts->cnt_id = cnt_id;
1584 : : return 0;
1585 : : }
1586 : :
1587 : : static __rte_always_inline bool
1588 : : is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1589 : : {
1590 : : /*
1591 : : * Order of RTE VLAN push actions is
1592 : : * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1593 : : */
1594 : 0 : return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1595 : : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1596 : : }
1597 : :
1598 : : static __rte_always_inline bool
1599 : : is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1600 : : {
1601 : : /*
1602 : : * In masked push VLAN template all RTE push actions are masked.
1603 : : */
1604 [ # # ]: 0 : return mask && mask->ethertype != 0;
1605 : : }
1606 : :
1607 : 0 : static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1608 : : {
1609 : : /*
1610 : : * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1611 : : */
1612 : : rte_be32_t type, vid, pcp;
1613 : : #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1614 : : rte_be32_t vid_lo, vid_hi;
1615 : : #endif
1616 : :
1617 : 0 : type = ((const struct rte_flow_action_of_push_vlan *)
1618 : 0 : actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1619 : 0 : vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1620 : 0 : actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1621 : : pcp = is_of_vlan_pcp_present(actions) ?
1622 : : ((const struct rte_flow_action_of_set_vlan_pcp *)
1623 [ # # # # ]: 0 : actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1624 : : #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1625 : 0 : vid_hi = vid & 0xff;
1626 : : vid_lo = vid >> 8;
1627 : 0 : return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1628 : : #else
1629 : : return (type << 16) | (pcp << 13) | vid;
1630 : : #endif
1631 : : }
1632 : :
1633 : : static __rte_always_inline struct mlx5_aso_mtr *
1634 : : flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1635 : : const struct rte_flow_action *action,
1636 : : struct mlx5_hw_q_job *job, bool push,
1637 : : struct rte_flow_error *error)
1638 : : {
1639 : : struct mlx5_priv *priv = dev->data->dev_private;
1640 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1641 : 0 : const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1642 : : struct mlx5_aso_mtr *aso_mtr;
1643 : : struct mlx5_flow_meter_info *fm;
1644 : : uint32_t mtr_id;
1645 : : uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
1646 : : MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1647 : :
1648 : 0 : if (priv->shared_host) {
1649 : 0 : rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1650 : : "Meter mark actions can only be created on the host port");
1651 : : return NULL;
1652 : : }
1653 [ # # # # : 0 : if (meter_mark->profile == NULL)
# # # # #
# ]
1654 : : return NULL;
1655 : 0 : aso_mtr = mlx5_ipool_malloc(priv->hws_mpool->idx_pool, &mtr_id);
1656 [ # # # # : 0 : if (!aso_mtr)
# # # # #
# ]
1657 : : return NULL;
1658 : : /* Fill the flow meter parameters. */
1659 : 0 : aso_mtr->type = ASO_METER_INDIRECT;
1660 : : fm = &aso_mtr->fm;
1661 : 0 : fm->meter_id = mtr_id;
1662 : 0 : fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1663 : 0 : fm->is_enable = meter_mark->state;
1664 : 0 : fm->color_aware = meter_mark->color_mode;
1665 : 0 : aso_mtr->pool = pool;
1666 [ # # ]: 0 : aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
1667 : : ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1668 : 0 : aso_mtr->offset = mtr_id - 1;
1669 [ # # # # : 0 : aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
# # # # #
# ]
1670 : 0 : job->action = (void *)(handle | mtr_id);
1671 : : /* Update ASO flow meter by wqe. */
1672 [ # # # # : 0 : if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
# # # # #
# ]
1673 : : &priv->mtr_bulk, job, push)) {
1674 : 0 : mlx5_ipool_free(pool->idx_pool, mtr_id);
1675 : : return NULL;
1676 : : }
1677 : : /* Wait for ASO object completion. */
1678 [ # # # # : 0 : if (queue == MLX5_HW_INV_QUEUE &&
# # # # #
# # # ]
1679 : 0 : mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
1680 : 0 : mlx5_ipool_free(pool->idx_pool, mtr_id);
1681 : : return NULL;
1682 : : }
1683 : : return aso_mtr;
1684 : : }
1685 : :
1686 : : static __rte_always_inline int
1687 : : flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
1688 : : uint16_t aso_mtr_pos,
1689 : : const struct rte_flow_action *action,
1690 : : struct mlx5dr_rule_action *acts,
1691 : : uint32_t *index,
1692 : : uint32_t queue,
1693 : : struct rte_flow_error *error)
1694 : : {
1695 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1696 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1697 : : struct mlx5_aso_mtr *aso_mtr;
1698 : : struct mlx5_hw_q_job *job =
1699 : : flow_hw_action_job_init(priv, queue, NULL, NULL, NULL,
1700 : : MLX5_HW_Q_JOB_TYPE_CREATE,
1701 : : MLX5_HW_INDIRECT_TYPE_LEGACY, NULL);
1702 : :
1703 : : if (!job)
1704 : : return -1;
1705 : : aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job,
1706 : : true, error);
1707 : : if (!aso_mtr) {
1708 : : flow_hw_job_put(priv, job, queue);
1709 : : return -1;
1710 : : }
1711 : :
1712 : : /* Compile METER_MARK action */
1713 : 0 : acts[aso_mtr_pos].action = pool->action;
1714 : 0 : acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
1715 : 0 : *index = aso_mtr->fm.meter_id;
1716 : : return 0;
1717 : : }
1718 : :
1719 : : static int
1720 : 0 : flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
1721 : : __rte_unused const struct mlx5_action_construct_data *act_data,
1722 : : const struct rte_flow_action *action,
1723 : : struct mlx5dr_rule_action *dr_rule)
1724 : : {
1725 : 0 : const struct rte_flow_action_indirect_list *list_conf = action->conf;
1726 : 0 : const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
1727 : :
1728 : 0 : dr_rule->action = mirror->mirror_action;
1729 : 0 : return 0;
1730 : : }
1731 : :
1732 : : /**
1733 : : * HWS mirror implemented as FW island.
1734 : : * The action does not support indirect list flow configuration.
1735 : : * If template handle was masked, use handle mirror action in flow rules.
1736 : : * Otherwise let flow rule specify mirror handle.
1737 : : */
1738 : : static int
1739 : 0 : hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
1740 : : const struct rte_flow_action *action,
1741 : : const struct rte_flow_action *mask,
1742 : : struct mlx5_hw_actions *acts,
1743 : : uint16_t action_src, uint16_t action_dst)
1744 : : {
1745 : : int ret = 0;
1746 : 0 : const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1747 : :
1748 [ # # # # ]: 0 : if (mask_conf && mask_conf->handle) {
1749 : : /**
1750 : : * If mirror handle was masked, assign fixed DR5 mirror action.
1751 : : */
1752 : : flow_hw_translate_indirect_mirror(dev, NULL, action,
1753 : 0 : &acts->rule_acts[action_dst]);
1754 : : } else {
1755 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1756 : : ret = flow_hw_act_data_indirect_list_append
1757 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1758 : : action_src, action_dst,
1759 : : flow_hw_translate_indirect_mirror);
1760 : : }
1761 : 0 : return ret;
1762 : : }
1763 : :
1764 : : static int
1765 : 0 : flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,
1766 : : __rte_unused const struct mlx5_action_construct_data *data,
1767 : : const struct rte_flow_action *action,
1768 : : struct mlx5dr_rule_action *dr_rule)
1769 : : {
1770 : 0 : const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
1771 : :
1772 : 0 : dr_rule->action = ((struct mlx5_hw_encap_decap_action *)
1773 : 0 : (indlst_conf->handle))->action;
1774 [ # # ]: 0 : if (!dr_rule->action)
1775 : 0 : return -EINVAL;
1776 : : return 0;
1777 : : }
1778 : :
1779 : : /**
1780 : : * Template conf must not be masked. If handle is masked, use the one in template,
1781 : : * otherwise update per flow rule.
1782 : : */
1783 : : static int
1784 : 0 : hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,
1785 : : const struct rte_flow_action *action,
1786 : : const struct rte_flow_action *mask,
1787 : : struct mlx5_hw_actions *acts,
1788 : : uint16_t action_src, uint16_t action_dst)
1789 : : {
1790 : : int ret = -1;
1791 : 0 : const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1792 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1793 : :
1794 [ # # # # : 0 : if (mask_conf && mask_conf->handle && !mask_conf->conf)
# # ]
1795 : : /**
1796 : : * If handle was masked, assign fixed DR action.
1797 : : */
1798 : : ret = flow_hw_reformat_action(dev, NULL, action,
1799 [ # # ]: 0 : &acts->rule_acts[action_dst]);
1800 [ # # # # : 0 : else if (mask_conf && !mask_conf->handle && !mask_conf->conf)
# # ]
1801 : : ret = flow_hw_act_data_indirect_list_append
1802 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1803 : : action_src, action_dst, flow_hw_reformat_action);
1804 : 0 : return ret;
1805 : : }
1806 : :
1807 : : static int
1808 : 0 : flow_dr_set_meter(struct mlx5_priv *priv,
1809 : : struct mlx5dr_rule_action *dr_rule,
1810 : : const struct rte_flow_action_indirect_list *action_conf)
1811 : : {
1812 : 0 : const struct mlx5_indlst_legacy *legacy_obj =
1813 : : (typeof(legacy_obj))action_conf->handle;
1814 : 0 : struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
1815 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
1816 : 0 : uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1817 : 0 : struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
1818 : :
1819 [ # # ]: 0 : if (!aso_mtr)
1820 : : return -EINVAL;
1821 : 0 : dr_rule->action = mtr_pool->action;
1822 : 0 : dr_rule->aso_meter.offset = aso_mtr->offset;
1823 : 0 : return 0;
1824 : : }
1825 : :
1826 : : __rte_always_inline static void
1827 : : flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
1828 : : {
1829 : 0 : dr_rule->aso_meter.init_color =
1830 : 0 : (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
1831 : 0 : }
1832 : :
1833 : : static int
1834 : 0 : flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
1835 : : const struct mlx5_action_construct_data *act_data,
1836 : : const struct rte_flow_action *action,
1837 : : struct mlx5dr_rule_action *dr_rule)
1838 : : {
1839 : : int ret;
1840 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1841 : 0 : const struct rte_flow_action_indirect_list *action_conf = action->conf;
1842 : 0 : const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
1843 : : (typeof(flow_conf))action_conf->conf;
1844 : :
1845 : 0 : ret = flow_dr_set_meter(priv, dr_rule, action_conf);
1846 [ # # ]: 0 : if (ret)
1847 : : return ret;
1848 [ # # ]: 0 : if (!act_data->shared_meter.conf_masked) {
1849 [ # # # # : 0 : if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
# # ]
1850 : : flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
1851 : : }
1852 : : return 0;
1853 : : }
1854 : :
1855 : : static int
1856 : 0 : hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
1857 : : const struct rte_flow_action *action,
1858 : : const struct rte_flow_action *mask,
1859 : : struct mlx5_hw_actions *acts,
1860 : : uint16_t action_src, uint16_t action_dst)
1861 : : {
1862 : : int ret;
1863 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1864 : 0 : const struct rte_flow_action_indirect_list *action_conf = action->conf;
1865 : 0 : const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
1866 [ # # # # ]: 0 : bool is_handle_masked = mask_conf && mask_conf->handle;
1867 [ # # # # : 0 : bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
# # ]
1868 : 0 : struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
1869 : :
1870 [ # # ]: 0 : if (is_handle_masked) {
1871 : 0 : ret = flow_dr_set_meter(priv, dr_rule, action->conf);
1872 [ # # ]: 0 : if (ret)
1873 : : return ret;
1874 : : }
1875 [ # # ]: 0 : if (is_conf_masked) {
1876 : : const struct
1877 : 0 : rte_flow_indirect_update_flow_meter_mark **flow_conf =
1878 : : (typeof(flow_conf))action_conf->conf;
1879 : : flow_dr_mtr_flow_color(dr_rule,
1880 [ # # ]: 0 : flow_conf[0]->init_color);
1881 : : }
1882 [ # # ]: 0 : if (!is_handle_masked || !is_conf_masked) {
1883 : : struct mlx5_action_construct_data *act_data;
1884 : :
1885 : : ret = flow_hw_act_data_indirect_list_append
1886 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
1887 : : action_src, action_dst, flow_hw_translate_indirect_meter);
1888 : : if (ret)
1889 : 0 : return ret;
1890 : : act_data = LIST_FIRST(&acts->act_list);
1891 : 0 : act_data->shared_meter.conf_masked = is_conf_masked;
1892 : : }
1893 : : return 0;
1894 : : }
1895 : :
1896 : : static int
1897 : : hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
1898 : : const struct rte_flow_action *action,
1899 : : const struct rte_flow_action *mask,
1900 : : struct mlx5_hw_actions *acts,
1901 : : uint16_t action_src, uint16_t action_dst)
1902 : : {
1903 : : int ret;
1904 : : const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
1905 : : struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
1906 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
1907 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1908 : :
1909 : 0 : switch (type) {
1910 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1911 : 0 : ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
1912 : : acts, action_src,
1913 : : action_dst);
1914 : 0 : break;
1915 : : default:
1916 : : ret = -EINVAL;
1917 : : break;
1918 : : }
1919 : : return ret;
1920 : : }
1921 : :
1922 : : /*
1923 : : * template .. indirect_list handle Ht conf Ct ..
1924 : : * mask .. indirect_list handle Hm conf Cm ..
1925 : : *
1926 : : * PMD requires Ht != 0 to resolve handle type.
1927 : : * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
1928 : : * not change. Otherwise, DR5 action will be resolved during flow rule build.
1929 : : * If Ct was masked (Cm != 0), table template processing updates base
1930 : : * indirect action configuration with Ct parameters.
1931 : : */
1932 : : static int
1933 : 0 : table_template_translate_indirect_list(struct rte_eth_dev *dev,
1934 : : const struct rte_flow_action *action,
1935 : : const struct rte_flow_action *mask,
1936 : : struct mlx5_hw_actions *acts,
1937 : : uint16_t action_src, uint16_t action_dst)
1938 : : {
1939 : : int ret = 0;
1940 : : enum mlx5_indirect_list_type type;
1941 : 0 : const struct rte_flow_action_indirect_list *list_conf = action->conf;
1942 : :
1943 [ # # # # ]: 0 : if (!list_conf || !list_conf->handle)
1944 : : return -EINVAL;
1945 : : type = mlx5_get_indirect_list_type(list_conf->handle);
1946 [ # # # # ]: 0 : switch (type) {
1947 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
1948 [ # # ]: 0 : ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
1949 : : acts, action_src,
1950 : : action_dst);
1951 : : break;
1952 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
1953 : 0 : ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
1954 : : acts, action_src,
1955 : : action_dst);
1956 : 0 : break;
1957 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
1958 [ # # ]: 0 : if (list_conf->conf)
1959 : : return -EINVAL;
1960 : 0 : ret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,
1961 : : acts, action_src,
1962 : : action_dst);
1963 : 0 : break;
1964 : : default:
1965 : : return -EINVAL;
1966 : : }
1967 : : return ret;
1968 : : }
1969 : :
1970 : : static int
1971 [ # # ]: 0 : mlx5_tbl_translate_reformat(struct mlx5_priv *priv,
1972 : : const struct rte_flow_template_table_attr *table_attr,
1973 : : struct mlx5_hw_actions *acts,
1974 : : struct rte_flow_actions_template *at,
1975 : : const struct rte_flow_item *enc_item,
1976 : : const struct rte_flow_item *enc_item_m,
1977 : : uint8_t *encap_data, uint8_t *encap_data_m,
1978 : : struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
1979 : : size_t data_size, uint16_t reformat_src,
1980 : : enum mlx5dr_action_type refmt_type,
1981 : : struct rte_flow_error *error)
1982 : : {
1983 : : int mp_reformat_ix = mlx5_multi_pattern_reformat_to_index(refmt_type);
1984 : : const struct rte_flow_attr *attr = &table_attr->flow_attr;
1985 : : enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
1986 : : struct mlx5dr_action_reformat_header hdr;
1987 : : uint8_t buf[MLX5_ENCAP_MAX_LEN];
1988 : : bool shared_rfmt = false;
1989 : : int ret;
1990 : :
1991 : : MLX5_ASSERT(at->reformat_off != UINT16_MAX);
1992 [ # # ]: 0 : if (enc_item) {
1993 : : MLX5_ASSERT(!encap_data);
1994 : 0 : ret = flow_dv_convert_encap_data(enc_item, buf, &data_size, error);
1995 [ # # ]: 0 : if (ret)
1996 : : return ret;
1997 : : encap_data = buf;
1998 [ # # ]: 0 : if (enc_item_m)
1999 : : shared_rfmt = true;
2000 [ # # ]: 0 : } else if (encap_data && encap_data_m) {
2001 : : shared_rfmt = true;
2002 : : }
2003 : 0 : acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
2004 : : sizeof(*acts->encap_decap) + data_size,
2005 : : 0, SOCKET_ID_ANY);
2006 [ # # ]: 0 : if (!acts->encap_decap)
2007 : 0 : return rte_flow_error_set(error, ENOMEM,
2008 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2009 : : NULL, "no memory for reformat context");
2010 : 0 : hdr.sz = data_size;
2011 : 0 : hdr.data = encap_data;
2012 [ # # ]: 0 : if (shared_rfmt || mp_reformat_ix < 0) {
2013 : 0 : uint16_t reformat_ix = at->reformat_off;
2014 : 0 : uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
2015 : : MLX5DR_ACTION_FLAG_SHARED;
2016 : :
2017 : 0 : acts->encap_decap->action =
2018 : 0 : mlx5dr_action_create_reformat(priv->dr_ctx, refmt_type,
2019 : : 1, &hdr, 0, flags);
2020 [ # # ]: 0 : if (!acts->encap_decap->action)
2021 : 0 : return -rte_errno;
2022 : 0 : acts->rule_acts[reformat_ix].action = acts->encap_decap->action;
2023 : 0 : acts->rule_acts[reformat_ix].reformat.data = acts->encap_decap->data;
2024 : 0 : acts->rule_acts[reformat_ix].reformat.offset = 0;
2025 : 0 : acts->encap_decap->shared = true;
2026 : : } else {
2027 : : uint32_t ix;
2028 : 0 : typeof(mp_ctx->reformat[0]) *reformat = mp_ctx->reformat +
2029 : : mp_reformat_ix;
2030 : :
2031 : 0 : ix = reformat->elements_num++;
2032 : 0 : reformat->reformat_hdr[ix] = hdr;
2033 : 0 : acts->rule_acts[at->reformat_off].reformat.hdr_idx = ix;
2034 : 0 : acts->encap_decap_pos = at->reformat_off;
2035 : 0 : acts->encap_decap->multi_pattern = 1;
2036 : 0 : acts->encap_decap->data_size = data_size;
2037 : 0 : acts->encap_decap->action_type = refmt_type;
2038 : 0 : ret = __flow_hw_act_data_encap_append
2039 : 0 : (priv, acts, (at->actions + reformat_src)->type,
2040 : : reformat_src, at->reformat_off, data_size);
2041 : : if (ret)
2042 : 0 : return -rte_errno;
2043 : : mlx5_multi_pattern_activate(mp_ctx);
2044 : : }
2045 : : return 0;
2046 : : }
2047 : :
2048 : : static int
2049 : 0 : mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
2050 : : const struct mlx5_flow_template_table_cfg *cfg,
2051 : : struct mlx5_hw_actions *acts,
2052 : : struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2053 : : struct mlx5_hw_modify_header_action *mhdr,
2054 : : struct rte_flow_error *error)
2055 : : {
2056 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
2057 : : const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2058 : : const struct rte_flow_attr *attr = &table_attr->flow_attr;
2059 : : enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr);
2060 : 0 : uint16_t mhdr_ix = mhdr->pos;
2061 : 0 : struct mlx5dr_action_mh_pattern pattern = {
2062 : 0 : .sz = sizeof(struct mlx5_modification_cmd) * mhdr->mhdr_cmds_num
2063 : : };
2064 : :
2065 [ # # ]: 0 : if (flow_hw_validate_compiled_modify_field(dev, cfg, mhdr, error)) {
2066 : 0 : __flow_hw_action_template_destroy(dev, acts);
2067 : 0 : return -rte_errno;
2068 : : }
2069 : 0 : acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
2070 : : 0, SOCKET_ID_ANY);
2071 [ # # ]: 0 : if (!acts->mhdr)
2072 : 0 : return rte_flow_error_set(error, ENOMEM,
2073 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2074 : : NULL, "translate modify_header: no memory for modify header context");
2075 : : rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
2076 : 0 : pattern.data = (__be64 *)acts->mhdr->mhdr_cmds;
2077 [ # # ]: 0 : if (mhdr->shared) {
2078 : 0 : uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
2079 : : MLX5DR_ACTION_FLAG_SHARED;
2080 : :
2081 : 0 : acts->mhdr->action = mlx5dr_action_create_modify_header
2082 : : (priv->dr_ctx, 1, &pattern, 0,
2083 : : flags);
2084 [ # # ]: 0 : if (!acts->mhdr->action)
2085 : 0 : return rte_flow_error_set(error, rte_errno,
2086 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2087 : : NULL, "translate modify_header: failed to create DR action");
2088 : 0 : acts->rule_acts[mhdr_ix].action = acts->mhdr->action;
2089 : : } else {
2090 : : typeof(mp_ctx->mh) *mh = &mp_ctx->mh;
2091 : 0 : uint32_t idx = mh->elements_num;
2092 : :
2093 : 0 : mh->pattern[mh->elements_num++] = pattern;
2094 : 0 : acts->mhdr->multi_pattern = 1;
2095 : 0 : acts->rule_acts[mhdr_ix].modify_header.pattern_idx = idx;
2096 : : mlx5_multi_pattern_activate(mp_ctx);
2097 : : }
2098 : : return 0;
2099 : : }
2100 : :
2101 : :
2102 : : static int
2103 : 0 : mlx5_create_ipv6_ext_reformat(struct rte_eth_dev *dev,
2104 : : const struct mlx5_flow_template_table_cfg *cfg,
2105 : : struct mlx5_hw_actions *acts,
2106 : : struct rte_flow_actions_template *at,
2107 : : uint8_t *push_data, uint8_t *push_data_m,
2108 : : size_t push_size, uint16_t recom_src,
2109 : : enum mlx5dr_action_type recom_type)
2110 : : {
2111 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
2112 : : const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2113 : : const struct rte_flow_attr *attr = &table_attr->flow_attr;
2114 : : enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
2115 : : struct mlx5_action_construct_data *act_data;
2116 : 0 : struct mlx5dr_action_reformat_header hdr = {0};
2117 : : uint32_t flag, bulk = 0;
2118 : :
2119 : 0 : flag = mlx5_hw_act_flag[!!attr->group][type];
2120 : 0 : acts->push_remove = mlx5_malloc(MLX5_MEM_ZERO,
2121 : : sizeof(*acts->push_remove) + push_size,
2122 : : 0, SOCKET_ID_ANY);
2123 [ # # ]: 0 : if (!acts->push_remove)
2124 : : return -ENOMEM;
2125 : :
2126 [ # # # ]: 0 : switch (recom_type) {
2127 : 0 : case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
2128 [ # # ]: 0 : if (!push_data || !push_size)
2129 : 0 : goto err1;
2130 [ # # ]: 0 : if (!push_data_m) {
2131 [ # # ]: 0 : bulk = rte_log2_u32(table_attr->nb_flows);
2132 : : } else {
2133 : 0 : flag |= MLX5DR_ACTION_FLAG_SHARED;
2134 : 0 : acts->push_remove->shared = 1;
2135 : : }
2136 : 0 : acts->push_remove->data_size = push_size;
2137 : 0 : memcpy(acts->push_remove->data, push_data, push_size);
2138 : 0 : hdr.data = push_data;
2139 : 0 : hdr.sz = push_size;
2140 : 0 : break;
2141 : 0 : case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
2142 : 0 : flag |= MLX5DR_ACTION_FLAG_SHARED;
2143 : 0 : acts->push_remove->shared = 1;
2144 : 0 : break;
2145 : : default:
2146 : : break;
2147 : : }
2148 : :
2149 : 0 : acts->push_remove->action =
2150 : 0 : mlx5dr_action_create_reformat_ipv6_ext(priv->dr_ctx,
2151 : : recom_type, &hdr, bulk, flag);
2152 [ # # ]: 0 : if (!acts->push_remove->action)
2153 : 0 : goto err1;
2154 : 0 : acts->rule_acts[at->recom_off].action = acts->push_remove->action;
2155 : 0 : acts->rule_acts[at->recom_off].ipv6_ext.header = acts->push_remove->data;
2156 : 0 : acts->rule_acts[at->recom_off].ipv6_ext.offset = 0;
2157 : 0 : acts->push_remove_pos = at->recom_off;
2158 [ # # ]: 0 : if (!acts->push_remove->shared) {
2159 : 0 : act_data = __flow_hw_act_data_push_append(dev, acts,
2160 : : RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
2161 : : recom_src, at->recom_off, push_size);
2162 : : if (!act_data)
2163 : 0 : goto err;
2164 : : }
2165 : : return 0;
2166 : : err:
2167 [ # # ]: 0 : if (acts->push_remove->action)
2168 : 0 : mlx5dr_action_destroy(acts->push_remove->action);
2169 : 0 : err1:
2170 [ # # ]: 0 : if (acts->push_remove) {
2171 : 0 : mlx5_free(acts->push_remove);
2172 : 0 : acts->push_remove = NULL;
2173 : : }
2174 : : return -EINVAL;
2175 : : }
2176 : :
2177 : : /**
2178 : : * Translate rte_flow actions to DR action.
2179 : : *
2180 : : * As the action template has already indicated the actions. Translate
2181 : : * the rte_flow actions to DR action if possbile. So in flow create
2182 : : * stage we will save cycles from handing the actions' organizing.
2183 : : * For the actions with limited information, need to add these to a
2184 : : * list.
2185 : : *
2186 : : * @param[in] dev
2187 : : * Pointer to the rte_eth_dev structure.
2188 : : * @param[in] cfg
2189 : : * Pointer to the table configuration.
2190 : : * @param[in/out] acts
2191 : : * Pointer to the template HW steering DR actions.
2192 : : * @param[in] at
2193 : : * Action template.
2194 : : * @param[out] error
2195 : : * Pointer to error structure.
2196 : : *
2197 : : * @return
2198 : : * 0 on success, a negative errno otherwise and rte_errno is set.
2199 : : */
2200 : : static int
2201 : 0 : __flow_hw_actions_translate(struct rte_eth_dev *dev,
2202 : : const struct mlx5_flow_template_table_cfg *cfg,
2203 : : struct mlx5_hw_actions *acts,
2204 : : struct rte_flow_actions_template *at,
2205 : : struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2206 : : struct rte_flow_error *error)
2207 : : {
2208 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2209 : 0 : const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2210 : 0 : struct mlx5_hca_flex_attr *hca_attr = &priv->sh->cdev->config.hca_attr.flex;
2211 : 0 : const struct rte_flow_attr *attr = &table_attr->flow_attr;
2212 : 0 : struct rte_flow_action *actions = at->actions;
2213 : 0 : struct rte_flow_action *masks = at->masks;
2214 : : enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
2215 : : enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
2216 : : const struct rte_flow_action_raw_encap *raw_encap_data;
2217 : : const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
2218 : : const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
2219 : : uint16_t reformat_src = 0, recom_src = 0;
2220 : : uint8_t *encap_data = NULL, *encap_data_m = NULL;
2221 : : uint8_t *push_data = NULL, *push_data_m = NULL;
2222 : : size_t data_size = 0, push_size = 0;
2223 : : struct mlx5_hw_modify_header_action mhdr = { 0 };
2224 : : bool actions_end = false;
2225 : : uint32_t type;
2226 : : bool reformat_used = false;
2227 : : bool recom_used = false;
2228 : : unsigned int of_vlan_offset;
2229 : : uint16_t jump_pos;
2230 : : uint32_t ct_idx;
2231 : : int ret, err;
2232 [ # # ]: 0 : uint32_t target_grp = 0;
2233 : : int table_type;
2234 : :
2235 : : flow_hw_modify_field_init(&mhdr, at);
2236 [ # # ]: 0 : if (attr->transfer)
2237 : : type = MLX5DR_TABLE_TYPE_FDB;
2238 [ # # ]: 0 : else if (attr->egress)
2239 : : type = MLX5DR_TABLE_TYPE_NIC_TX;
2240 : : else
2241 : : type = MLX5DR_TABLE_TYPE_NIC_RX;
2242 [ # # ]: 0 : for (; !actions_end; actions++, masks++) {
2243 : 0 : uint64_t pos = actions - at->actions;
2244 : 0 : uint16_t src_pos = pos - at->src_off[pos];
2245 : 0 : uint16_t dr_pos = at->dr_off[pos];
2246 : :
2247 [ # # # # : 0 : switch ((int)actions->type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
2248 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2249 [ # # ]: 0 : if (!attr->group) {
2250 : 0 : DRV_LOG(ERR, "Indirect action is not supported in root table.");
2251 : 0 : goto err;
2252 : : }
2253 : 0 : ret = table_template_translate_indirect_list
2254 : : (dev, actions, masks, acts, src_pos, dr_pos);
2255 [ # # ]: 0 : if (ret)
2256 : 0 : goto err;
2257 : : break;
2258 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
2259 [ # # ]: 0 : if (!attr->group) {
2260 : 0 : DRV_LOG(ERR, "Indirect action is not supported in root table.");
2261 : 0 : goto err;
2262 : : }
2263 [ # # # # ]: 0 : if (actions->conf && masks->conf) {
2264 [ # # # # : 0 : if (flow_hw_shared_action_translate
# # # ]
2265 : : (dev, actions, acts, src_pos, dr_pos))
2266 : 0 : goto err;
2267 : : } else if (__flow_hw_act_data_general_append
2268 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
2269 : : src_pos, dr_pos)){
2270 : 0 : goto err;
2271 : : }
2272 : : break;
2273 : : case RTE_FLOW_ACTION_TYPE_VOID:
2274 : : break;
2275 : 0 : case RTE_FLOW_ACTION_TYPE_DROP:
2276 : 0 : acts->rule_acts[dr_pos].action =
2277 : 0 : priv->hw_drop[!!attr->group];
2278 : 0 : break;
2279 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
2280 [ # # ]: 0 : if (!attr->group) {
2281 : 0 : DRV_LOG(ERR, "Port representor is not supported in root table.");
2282 : 0 : goto err;
2283 : : }
2284 : 0 : acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2285 : 0 : break;
2286 : 0 : case RTE_FLOW_ACTION_TYPE_MARK:
2287 : 0 : acts->mark = true;
2288 [ # # ]: 0 : if (masks->conf &&
2289 : : ((const struct rte_flow_action_mark *)
2290 [ # # ]: 0 : masks->conf)->id)
2291 : 0 : acts->rule_acts[dr_pos].tag.value =
2292 : : mlx5_flow_mark_set
2293 : : (((const struct rte_flow_action_mark *)
2294 [ # # ]: 0 : (actions->conf))->id);
2295 : : else if (__flow_hw_act_data_general_append(priv, acts,
2296 : : actions->type,
2297 : : src_pos, dr_pos))
2298 : 0 : goto err;
2299 : 0 : acts->rule_acts[dr_pos].action =
2300 : 0 : priv->hw_tag[!!attr->group];
2301 : 0 : __atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
2302 : 0 : flow_hw_rxq_flag_set(dev, true);
2303 : 0 : break;
2304 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2305 : 0 : acts->rule_acts[dr_pos].action =
2306 : 0 : priv->hw_push_vlan[type];
2307 [ # # # # ]: 0 : if (is_template_masked_push_vlan(masks->conf))
2308 : 0 : acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
2309 : : vlan_hdr_to_be32(actions);
2310 : : else if (__flow_hw_act_data_general_append
2311 : : (priv, acts, actions->type,
2312 : : src_pos, dr_pos))
2313 : 0 : goto err;
2314 : : of_vlan_offset = is_of_vlan_pcp_present(actions) ?
2315 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
2316 : : MLX5_HW_VLAN_PUSH_VID_IDX;
2317 : 0 : actions += of_vlan_offset;
2318 : 0 : masks += of_vlan_offset;
2319 : 0 : break;
2320 : 0 : case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2321 : 0 : acts->rule_acts[dr_pos].action =
2322 : 0 : priv->hw_pop_vlan[type];
2323 : 0 : break;
2324 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
2325 [ # # ]: 0 : if (masks->conf &&
2326 : : ((const struct rte_flow_action_jump *)
2327 [ # # ]: 0 : masks->conf)->group) {
2328 : 0 : uint32_t jump_group =
2329 : : ((const struct rte_flow_action_jump *)
2330 : 0 : actions->conf)->group;
2331 : 0 : acts->jump = flow_hw_jump_action_register
2332 : : (dev, cfg, jump_group, error);
2333 [ # # ]: 0 : if (!acts->jump)
2334 : 0 : goto err;
2335 : 0 : acts->rule_acts[dr_pos].action = (!!attr->group) ?
2336 [ # # ]: 0 : acts->jump->hws_action :
2337 : : acts->jump->root_action;
2338 : : } else if (__flow_hw_act_data_general_append
2339 : : (priv, acts, actions->type,
2340 : : src_pos, dr_pos)){
2341 : 0 : goto err;
2342 : : }
2343 : : break;
2344 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
2345 [ # # ]: 0 : if (masks->conf &&
2346 : : ((const struct rte_flow_action_queue *)
2347 [ # # ]: 0 : masks->conf)->index) {
2348 : 0 : acts->tir = flow_hw_tir_action_register
2349 : : (dev,
2350 : 0 : mlx5_hw_act_flag[!!attr->group][type],
2351 : : actions);
2352 [ # # ]: 0 : if (!acts->tir)
2353 : 0 : goto err;
2354 : 0 : acts->rule_acts[dr_pos].action =
2355 : 0 : acts->tir->action;
2356 : : } else if (__flow_hw_act_data_general_append
2357 : : (priv, acts, actions->type,
2358 : : src_pos, dr_pos)) {
2359 : 0 : goto err;
2360 : : }
2361 : : break;
2362 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
2363 [ # # # # ]: 0 : if (actions->conf && masks->conf) {
2364 : 0 : acts->tir = flow_hw_tir_action_register
2365 : : (dev,
2366 : 0 : mlx5_hw_act_flag[!!attr->group][type],
2367 : : actions);
2368 [ # # ]: 0 : if (!acts->tir)
2369 : 0 : goto err;
2370 : 0 : acts->rule_acts[dr_pos].action =
2371 : 0 : acts->tir->action;
2372 : : } else if (__flow_hw_act_data_general_append
2373 : : (priv, acts, actions->type,
2374 : : src_pos, dr_pos)) {
2375 : 0 : goto err;
2376 : : }
2377 : : break;
2378 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2379 : : MLX5_ASSERT(!reformat_used);
2380 : 0 : enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2381 : : actions->conf);
2382 [ # # ]: 0 : if (masks->conf)
2383 : 0 : enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
2384 : : masks->conf);
2385 : : reformat_used = true;
2386 : : reformat_src = src_pos;
2387 : : refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2388 : : break;
2389 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2390 : : MLX5_ASSERT(!reformat_used);
2391 : 0 : enc_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2392 : : actions->conf);
2393 [ # # ]: 0 : if (masks->conf)
2394 : 0 : enc_item_m = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
2395 : : masks->conf);
2396 : : reformat_used = true;
2397 : : reformat_src = src_pos;
2398 : : refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2399 : : break;
2400 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2401 : 0 : raw_encap_data =
2402 : : (const struct rte_flow_action_raw_encap *)
2403 : : masks->conf;
2404 [ # # ]: 0 : if (raw_encap_data)
2405 : 0 : encap_data_m = raw_encap_data->data;
2406 : 0 : raw_encap_data =
2407 : : (const struct rte_flow_action_raw_encap *)
2408 : : actions->conf;
2409 : 0 : encap_data = raw_encap_data->data;
2410 : 0 : data_size = raw_encap_data->size;
2411 [ # # ]: 0 : if (reformat_used) {
2412 : : refmt_type = data_size <
2413 : : MLX5_ENCAPSULATION_DECISION_SIZE ?
2414 [ # # ]: 0 : MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
2415 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
2416 : : } else {
2417 : : reformat_used = true;
2418 : : refmt_type =
2419 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2420 : : }
2421 : : reformat_src = src_pos;
2422 : : break;
2423 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2424 : : case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2425 : : MLX5_ASSERT(!reformat_used);
2426 : : reformat_used = true;
2427 : : refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2428 : 0 : break;
2429 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2430 : : reformat_used = true;
2431 : : refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2432 : 0 : break;
2433 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
2434 [ # # ]: 0 : if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2435 [ # # ]: 0 : !priv->sh->srh_flex_parser.flex.mapnum) {
2436 : 0 : DRV_LOG(ERR, "SRv6 anchor is not supported.");
2437 : 0 : goto err;
2438 : : }
2439 : : MLX5_ASSERT(!recom_used && !recom_type);
2440 : : recom_used = true;
2441 : : recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
2442 : 0 : ipv6_ext_data =
2443 : : (const struct rte_flow_action_ipv6_ext_push *)masks->conf;
2444 [ # # ]: 0 : if (ipv6_ext_data)
2445 : 0 : push_data_m = ipv6_ext_data->data;
2446 : 0 : ipv6_ext_data =
2447 : : (const struct rte_flow_action_ipv6_ext_push *)actions->conf;
2448 [ # # ]: 0 : if (ipv6_ext_data) {
2449 : 0 : push_data = ipv6_ext_data->data;
2450 : 0 : push_size = ipv6_ext_data->size;
2451 : : }
2452 : : recom_src = src_pos;
2453 : : break;
2454 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
2455 [ # # ]: 0 : if (!hca_attr->query_match_sample_info || !hca_attr->parse_graph_anchor ||
2456 [ # # ]: 0 : !priv->sh->srh_flex_parser.flex.mapnum) {
2457 : 0 : DRV_LOG(ERR, "SRv6 anchor is not supported.");
2458 : 0 : goto err;
2459 : : }
2460 : : recom_used = true;
2461 : : recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
2462 : : break;
2463 : 0 : case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
2464 : 0 : flow_hw_translate_group(dev, cfg, attr->group,
2465 : : &target_grp, error);
2466 [ # # ]: 0 : if (target_grp == 0) {
2467 : 0 : __flow_hw_action_template_destroy(dev, acts);
2468 : 0 : return rte_flow_error_set(error, ENOTSUP,
2469 : : RTE_FLOW_ERROR_TYPE_ACTION,
2470 : : NULL,
2471 : : "Send to kernel action on root table is not supported in HW steering mode");
2472 : : }
2473 [ # # ]: 0 : table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
2474 [ # # ]: 0 : ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
2475 : : MLX5DR_TABLE_TYPE_FDB);
2476 : 0 : acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
2477 : 0 : break;
2478 [ # # ]: 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2479 : : err = flow_hw_modify_field_compile(dev, attr, actions,
2480 : : masks, acts, &mhdr,
2481 : : src_pos, error);
2482 [ # # ]: 0 : if (err)
2483 : 0 : goto err;
2484 : : break;
2485 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2486 [ # # ]: 0 : if (flow_hw_represented_port_compile
2487 : : (dev, attr, actions,
2488 : : masks, acts, src_pos, dr_pos, error))
2489 : 0 : goto err;
2490 : : break;
2491 : 0 : case RTE_FLOW_ACTION_TYPE_METER:
2492 : : /*
2493 : : * METER action is compiled to 2 DR actions - ASO_METER and FT.
2494 : : * Calculated DR offset is stored only for ASO_METER and FT
2495 : : * is assumed to be the next action.
2496 : : */
2497 : 0 : jump_pos = dr_pos + 1;
2498 [ # # # # ]: 0 : if (actions->conf && masks->conf &&
2499 : : ((const struct rte_flow_action_meter *)
2500 [ # # ]: 0 : masks->conf)->mtr_id) {
2501 : 0 : err = flow_hw_meter_compile(dev, cfg,
2502 : : dr_pos, jump_pos, actions, acts, error);
2503 : : if (err)
2504 : 0 : goto err;
2505 : : } else if (__flow_hw_act_data_general_append(priv, acts,
2506 : : actions->type,
2507 : : src_pos,
2508 : : dr_pos))
2509 : 0 : goto err;
2510 : : break;
2511 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
2512 : 0 : flow_hw_translate_group(dev, cfg, attr->group,
2513 : : &target_grp, error);
2514 [ # # ]: 0 : if (target_grp == 0) {
2515 : 0 : __flow_hw_action_template_destroy(dev, acts);
2516 : 0 : return rte_flow_error_set(error, ENOTSUP,
2517 : : RTE_FLOW_ERROR_TYPE_ACTION,
2518 : : NULL,
2519 : : "Age action on root table is not supported in HW steering mode");
2520 : : }
2521 : 0 : if (__flow_hw_act_data_general_append(priv, acts,
2522 : : actions->type,
2523 : : src_pos,
2524 : : dr_pos))
2525 : 0 : goto err;
2526 : : break;
2527 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
2528 : 0 : flow_hw_translate_group(dev, cfg, attr->group,
2529 : : &target_grp, error);
2530 [ # # ]: 0 : if (target_grp == 0) {
2531 : 0 : __flow_hw_action_template_destroy(dev, acts);
2532 : 0 : return rte_flow_error_set(error, ENOTSUP,
2533 : : RTE_FLOW_ERROR_TYPE_ACTION,
2534 : : NULL,
2535 : : "Counter action on root table is not supported in HW steering mode");
2536 : : }
2537 [ # # ]: 0 : if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
2538 : : (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
2539 : : /*
2540 : : * When both COUNT and AGE are requested, it is
2541 : : * saved as AGE action which creates also the
2542 : : * counter.
2543 : : */
2544 : : break;
2545 [ # # ]: 0 : if (masks->conf &&
2546 : : ((const struct rte_flow_action_count *)
2547 [ # # ]: 0 : masks->conf)->id) {
2548 [ # # ]: 0 : err = flow_hw_cnt_compile(dev, dr_pos, acts);
2549 : : if (err)
2550 : 0 : goto err;
2551 : 0 : } else if (__flow_hw_act_data_general_append
2552 : : (priv, acts, actions->type,
2553 : : src_pos, dr_pos)) {
2554 : 0 : goto err;
2555 : : }
2556 : : break;
2557 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
2558 [ # # ]: 0 : if (masks->conf) {
2559 : 0 : ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(actions->conf);
2560 : : if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
2561 : 0 : &acts->rule_acts[dr_pos]))
2562 : 0 : goto err;
2563 : : } else if (__flow_hw_act_data_general_append
2564 : : (priv, acts, actions->type,
2565 : : src_pos, dr_pos)) {
2566 : 0 : goto err;
2567 : : }
2568 : : break;
2569 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
2570 [ # # # # ]: 0 : if (actions->conf && masks->conf &&
2571 : : ((const struct rte_flow_action_meter_mark *)
2572 [ # # ]: 0 : masks->conf)->profile) {
2573 : : err = flow_hw_meter_mark_compile(dev,
2574 : : dr_pos, actions,
2575 : 0 : acts->rule_acts,
2576 : : &acts->mtr_id,
2577 : : MLX5_HW_INV_QUEUE,
2578 : : error);
2579 : : if (err)
2580 : 0 : goto err;
2581 : : } else if (__flow_hw_act_data_general_append(priv, acts,
2582 : : actions->type,
2583 : : src_pos,
2584 : : dr_pos))
2585 : 0 : goto err;
2586 : : break;
2587 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
2588 : : /* Internal, can be skipped. */
2589 [ # # ]: 0 : if (!!attr->group) {
2590 : 0 : DRV_LOG(ERR, "DEFAULT MISS action is only"
2591 : : " supported in root table.");
2592 : 0 : goto err;
2593 : : }
2594 : 0 : acts->rule_acts[dr_pos].action = priv->hw_def_miss;
2595 : 0 : break;
2596 : 0 : case RTE_FLOW_ACTION_TYPE_NAT64:
2597 [ # # ]: 0 : if (masks->conf &&
2598 [ # # ]: 0 : ((const struct rte_flow_action_nat64 *)masks->conf)->type) {
2599 : 0 : const struct rte_flow_action_nat64 *nat64_c =
2600 : : (const struct rte_flow_action_nat64 *)actions->conf;
2601 : :
2602 : 0 : acts->rule_acts[dr_pos].action =
2603 : 0 : priv->action_nat64[type][nat64_c->type];
2604 : : } else if (__flow_hw_act_data_general_append(priv, acts,
2605 : : actions->type,
2606 : : src_pos, dr_pos))
2607 : 0 : goto err;
2608 : : break;
2609 : 0 : case RTE_FLOW_ACTION_TYPE_END:
2610 : : actions_end = true;
2611 : 0 : break;
2612 : : default:
2613 : : break;
2614 : : }
2615 : : }
2616 [ # # ]: 0 : if (mhdr.pos != UINT16_MAX) {
2617 : 0 : ret = mlx5_tbl_translate_modify_header(dev, cfg, acts, mp_ctx,
2618 : : &mhdr, error);
2619 [ # # ]: 0 : if (ret)
2620 : 0 : goto err;
2621 : : }
2622 [ # # ]: 0 : if (reformat_used) {
2623 : 0 : ret = mlx5_tbl_translate_reformat(priv, table_attr, acts, at,
2624 : : enc_item, enc_item_m,
2625 : : encap_data, encap_data_m,
2626 : : mp_ctx, data_size,
2627 : : reformat_src,
2628 : : refmt_type, error);
2629 [ # # ]: 0 : if (ret)
2630 : 0 : goto err;
2631 : : }
2632 [ # # ]: 0 : if (recom_used) {
2633 : : MLX5_ASSERT(at->recom_off != UINT16_MAX);
2634 : 0 : ret = mlx5_create_ipv6_ext_reformat(dev, cfg, acts, at, push_data,
2635 : : push_data_m, push_size, recom_src,
2636 : : recom_type);
2637 [ # # ]: 0 : if (ret)
2638 : 0 : goto err;
2639 : : }
2640 : : return 0;
2641 : 0 : err:
2642 : 0 : err = rte_errno;
2643 : 0 : __flow_hw_action_template_destroy(dev, acts);
2644 : 0 : return rte_flow_error_set(error, err,
2645 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2646 : : "fail to create rte table");
2647 : : }
2648 : :
2649 : : static __rte_always_inline struct mlx5dr_rule_action *
2650 : : flow_hw_get_dr_action_buffer(struct mlx5_priv *priv,
2651 : : struct rte_flow_template_table *table,
2652 : : uint8_t action_template_index,
2653 : : uint32_t queue)
2654 : : {
2655 : 0 : uint32_t offset = action_template_index * priv->nb_queue + queue;
2656 : :
2657 : 0 : return &table->rule_acts[offset].acts[0];
2658 : : }
2659 : :
2660 : : static void
2661 : 0 : flow_hw_populate_rule_acts_caches(struct rte_eth_dev *dev,
2662 : : struct rte_flow_template_table *table,
2663 : : uint8_t at_idx)
2664 : : {
2665 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2666 : : uint32_t q;
2667 : :
2668 [ # # ]: 0 : for (q = 0; q < priv->nb_queue; ++q) {
2669 : : struct mlx5dr_rule_action *rule_acts =
2670 : 0 : flow_hw_get_dr_action_buffer(priv, table, at_idx, q);
2671 : :
2672 [ # # ]: 0 : rte_memcpy(rule_acts, table->ats[at_idx].acts.rule_acts,
2673 : : sizeof(table->ats[at_idx].acts.rule_acts));
2674 : : }
2675 : 0 : }
2676 : :
2677 : : /**
2678 : : * Translate rte_flow actions to DR action.
2679 : : *
2680 : : * @param[in] dev
2681 : : * Pointer to the rte_eth_dev structure.
2682 : : * @param[in] tbl
2683 : : * Pointer to the flow template table.
2684 : : * @param[out] error
2685 : : * Pointer to error structure.
2686 : : *
2687 : : * @return
2688 : : * 0 on success, negative value otherwise and rte_errno is set.
2689 : : */
2690 : : static int
2691 : 0 : flow_hw_actions_translate(struct rte_eth_dev *dev,
2692 : : struct rte_flow_template_table *tbl,
2693 : : struct rte_flow_error *error)
2694 : : {
2695 : : int ret;
2696 : : uint32_t i;
2697 : :
2698 [ # # ]: 0 : for (i = 0; i < tbl->nb_action_templates; i++) {
2699 [ # # ]: 0 : if (__flow_hw_actions_translate(dev, &tbl->cfg,
2700 : : &tbl->ats[i].acts,
2701 : : tbl->ats[i].action_template,
2702 : : &tbl->mpctx, error))
2703 : 0 : goto err;
2704 : 0 : flow_hw_populate_rule_acts_caches(dev, tbl, i);
2705 : : }
2706 [ # # ]: 0 : ret = mlx5_tbl_multi_pattern_process(dev, tbl, &tbl->mpctx.segments[0],
2707 : : rte_log2_u32(tbl->cfg.attr.nb_flows),
2708 : : error);
2709 [ # # ]: 0 : if (ret)
2710 : 0 : goto err;
2711 : : return 0;
2712 : : err:
2713 [ # # ]: 0 : while (i--)
2714 : 0 : __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
2715 : : return -1;
2716 : : }
2717 : :
2718 : : /**
2719 : : * Get shared indirect action.
2720 : : *
2721 : : * @param[in] dev
2722 : : * Pointer to the rte_eth_dev data structure.
2723 : : * @param[in] act_data
2724 : : * Pointer to the recorded action construct data.
2725 : : * @param[in] item_flags
2726 : : * The matcher itme_flags used for RSS lookup.
2727 : : * @param[in] rule_act
2728 : : * Pointer to the shared action's destination rule DR action.
2729 : : *
2730 : : * @return
2731 : : * 0 on success, negative value otherwise and rte_errno is set.
2732 : : */
2733 : : static __rte_always_inline int
2734 : : flow_hw_shared_action_get(struct rte_eth_dev *dev,
2735 : : struct mlx5_action_construct_data *act_data,
2736 : : const uint64_t item_flags,
2737 : : struct mlx5dr_rule_action *rule_act)
2738 : : {
2739 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2740 : 0 : struct mlx5_flow_rss_desc rss_desc = { 0 };
2741 : 0 : uint64_t hash_fields = 0;
2742 : : uint32_t hrxq_idx = 0;
2743 : : struct mlx5_hrxq *hrxq = NULL;
2744 : : int act_type = act_data->type;
2745 : :
2746 : : switch (act_type) {
2747 : : case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
2748 : 0 : rss_desc.level = act_data->shared_rss.level;
2749 : 0 : rss_desc.types = act_data->shared_rss.types;
2750 : 0 : rss_desc.symmetric_hash_function = act_data->shared_rss.symmetric_hash_function;
2751 : 0 : flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
2752 : 0 : hrxq_idx = flow_dv_action_rss_hrxq_lookup
2753 : : (dev, act_data->shared_rss.idx, hash_fields);
2754 [ # # # # : 0 : if (hrxq_idx)
# # # # #
# # # ]
2755 : 0 : hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
2756 : : hrxq_idx);
2757 [ # # # # : 0 : if (hrxq) {
# # # # #
# # # ]
2758 : 0 : rule_act->action = hrxq->action;
2759 : : return 0;
2760 : : }
2761 : : break;
2762 : : default:
2763 : : DRV_LOG(WARNING, "Unsupported shared action type:%d",
2764 : : act_data->type);
2765 : : break;
2766 : : }
2767 : : return -1;
2768 : : }
2769 : :
2770 : : static void
2771 : 0 : flow_hw_construct_quota(struct mlx5_priv *priv,
2772 : : struct mlx5dr_rule_action *rule_act, uint32_t qid)
2773 : : {
2774 : 0 : rule_act->action = priv->quota_ctx.dr_action;
2775 : 0 : rule_act->aso_meter.offset = qid - 1;
2776 : 0 : rule_act->aso_meter.init_color =
2777 : : MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
2778 : 0 : }
2779 : :
2780 : : /**
2781 : : * Construct shared indirect action.
2782 : : *
2783 : : * @param[in] dev
2784 : : * Pointer to the rte_eth_dev data structure.
2785 : : * @param[in] queue
2786 : : * The flow creation queue index.
2787 : : * @param[in] action
2788 : : * Pointer to the shared indirect rte_flow action.
2789 : : * @param[in] table
2790 : : * Pointer to the flow table.
2791 : : * @param[in] it_idx
2792 : : * Item template index the action template refer to.
2793 : : * @param[in] action_flags
2794 : : * Actions bit-map detected in this template.
2795 : : * @param[in, out] flow
2796 : : * Pointer to the flow containing the counter.
2797 : : * @param[in] rule_act
2798 : : * Pointer to the shared action's destination rule DR action.
2799 : : *
2800 : : * @return
2801 : : * 0 on success, negative value otherwise and rte_errno is set.
2802 : : */
2803 : : static __rte_always_inline int
2804 : : flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
2805 : : const struct rte_flow_action *action,
2806 : : struct rte_flow_template_table *table,
2807 : : const uint8_t it_idx, uint64_t action_flags,
2808 : : struct rte_flow_hw *flow,
2809 : : struct mlx5dr_rule_action *rule_act)
2810 : : {
2811 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2812 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2813 : : struct mlx5_action_construct_data act_data;
2814 : : struct mlx5_shared_action_rss *shared_rss;
2815 : : struct mlx5_aso_mtr *aso_mtr;
2816 : : struct mlx5_age_info *age_info;
2817 : : struct mlx5_hws_age_param *param;
2818 : : struct rte_flow_hw_aux *aux;
2819 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
2820 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
2821 : 0 : uint32_t idx = act_idx &
2822 : : ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2823 : : uint64_t item_flags;
2824 : : cnt_id_t age_cnt;
2825 : :
2826 : : memset(&act_data, 0, sizeof(act_data));
2827 [ # # # # : 0 : switch (type) {
# # # # #
# # # # #
# # # # #
# # ]
2828 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
2829 : 0 : act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
2830 : 0 : shared_rss = mlx5_ipool_get
2831 : 0 : (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
2832 [ # # # # : 0 : if (!shared_rss)
# # ]
2833 : : return -1;
2834 : 0 : act_data.shared_rss.idx = idx;
2835 : 0 : act_data.shared_rss.level = shared_rss->origin.level;
2836 : 0 : act_data.shared_rss.types = !shared_rss->origin.types ?
2837 [ # # # # : 0 : RTE_ETH_RSS_IP :
# # ]
2838 : : shared_rss->origin.types;
2839 : 0 : act_data.shared_rss.symmetric_hash_function =
2840 : 0 : MLX5_RSS_IS_SYMM(shared_rss->origin.func);
2841 : :
2842 : 0 : item_flags = table->its[it_idx]->item_flags;
2843 : : if (flow_hw_shared_action_get
2844 : : (dev, &act_data, item_flags, rule_act))
2845 : : return -1;
2846 : : break;
2847 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
2848 : 0 : if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
2849 : : act_idx,
2850 : : &rule_act->action,
2851 : : &rule_act->counter.offset))
2852 : : return -1;
2853 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
2854 : 0 : flow->cnt_id = act_idx;
2855 : : break;
2856 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
2857 : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
2858 : : /*
2859 : : * Save the index with the indirect type, to recognize
2860 : : * it in flow destroy.
2861 : : */
2862 : : mlx5_flow_hw_aux_set_age_idx(flow, aux, act_idx);
2863 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
2864 [ # # # # : 0 : if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
# # ]
2865 : : /*
2866 : : * The mutual update for idirect AGE & COUNT will be
2867 : : * performed later after we have ID for both of them.
2868 : : */
2869 : : break;
2870 : 0 : age_info = GET_PORT_AGE_INFO(priv);
2871 : 0 : param = mlx5_ipool_get(age_info->ages_ipool, idx);
2872 [ # # # # : 0 : if (param == NULL)
# # ]
2873 : : return -1;
2874 [ # # # # : 0 : if (action_flags & MLX5_FLOW_ACTION_COUNT) {
# # ]
2875 [ # # # # : 0 : if (mlx5_hws_cnt_pool_get(priv->hws_cpool,
# # # # #
# # # ]
2876 : : ¶m->queue_id, &age_cnt,
2877 : : idx) < 0)
2878 : : return -1;
2879 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
2880 : 0 : flow->cnt_id = age_cnt;
2881 : 0 : param->nb_cnts++;
2882 : : } else {
2883 : : /*
2884 : : * Get the counter of this indirect AGE or create one
2885 : : * if doesn't exist.
2886 : : */
2887 : : age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
2888 [ # # # # : 0 : if (age_cnt == 0)
# # ]
2889 : : return -1;
2890 : : }
2891 : 0 : if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
2892 : : age_cnt, &rule_act->action,
2893 : : &rule_act->counter.offset))
2894 : : return -1;
2895 : : break;
2896 : : case MLX5_INDIRECT_ACTION_TYPE_CT:
2897 : : if (flow_hw_ct_compile(dev, queue, idx, rule_act))
2898 : : return -1;
2899 : : break;
2900 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
2901 : : /* Find ASO object. */
2902 : 0 : aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
2903 [ # # # # : 0 : if (!aso_mtr)
# # ]
2904 : : return -1;
2905 : 0 : rule_act->action = pool->action;
2906 : 0 : rule_act->aso_meter.offset = aso_mtr->offset;
2907 : : break;
2908 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
2909 : 0 : flow_hw_construct_quota(priv, rule_act, idx);
2910 : : break;
2911 : 0 : default:
2912 : 0 : DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
2913 : : break;
2914 : : }
2915 : : return 0;
2916 : : }
2917 : :
2918 : : static __rte_always_inline int
2919 : : flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
2920 : : {
2921 : : struct mlx5_modification_cmd cmd_he = {
2922 : 0 : .data0 = rte_be_to_cpu_32(cmd->data0),
2923 : : .data1 = 0,
2924 : : };
2925 : :
2926 : 0 : return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
2927 : : }
2928 : :
2929 : : /**
2930 : : * Construct flow action array.
2931 : : *
2932 : : * For action template contains dynamic actions, these actions need to
2933 : : * be updated according to the rte_flow action during flow creation.
2934 : : *
2935 : : * @param[in] dev
2936 : : * Pointer to the rte_eth_dev structure.
2937 : : * @param[in] job
2938 : : * Pointer to job descriptor.
2939 : : * @param[in] hw_acts
2940 : : * Pointer to translated actions from template.
2941 : : * @param[in] it_idx
2942 : : * Item template index the action template refer to.
2943 : : * @param[in] actions
2944 : : * Array of rte_flow action need to be checked.
2945 : : * @param[in] rule_acts
2946 : : * Array of DR rule actions to be used during flow creation..
2947 : : * @param[in] acts_num
2948 : : * Pointer to the real acts_num flow has.
2949 : : *
2950 : : * @return
2951 : : * 0 on success, negative value otherwise and rte_errno is set.
2952 : : */
2953 : : static __rte_always_inline int
2954 : : flow_hw_modify_field_construct(struct mlx5_modification_cmd *mhdr_cmd,
2955 : : struct mlx5_action_construct_data *act_data,
2956 : : const struct mlx5_hw_actions *hw_acts,
2957 : : const struct rte_flow_action *action)
2958 : : {
2959 : 0 : const struct rte_flow_action_modify_field *mhdr_action = action->conf;
2960 : 0 : uint8_t values[16] = { 0 };
2961 : : unaligned_uint32_t *value_p;
2962 : : uint32_t i;
2963 : : struct field_modify_info *field;
2964 : :
2965 [ # # # # : 0 : if (!hw_acts->mhdr)
# # # # #
# # # ]
2966 : : return -1;
2967 [ # # # # : 0 : if (hw_acts->mhdr->shared || act_data->modify_header.shared)
# # # # #
# # # # #
# # # # #
# # # #
# ]
2968 : : return 0;
2969 : : MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
2970 : : mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
2971 [ # # # # : 0 : if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
# # ]
2972 : : mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
2973 : : return 0;
2974 [ # # # # : 0 : if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
# # ]
2975 [ # # # # : 0 : rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
# # ]
2976 : : else
2977 [ # # # # : 0 : rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
# # ]
2978 [ # # # # : 0 : if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
# # ]
2979 [ # # # # : 0 : mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
# # ]
2980 [ # # # # : 0 : mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
# # ]
2981 : : mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
2982 : 0 : uint8_t tag_index = flow_tag_index_get(&mhdr_action->dst);
2983 : :
2984 : : value_p = (unaligned_uint32_t *)values;
2985 [ # # # # : 0 : if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
# # # # #
# # # ]
2986 : : tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
2987 [ # # # # : 0 : *value_p = rte_cpu_to_be_32(*value_p << 16);
# # ]
2988 : : else
2989 [ # # # # : 0 : *value_p = rte_cpu_to_be_32(*value_p);
# # ]
2990 [ # # # # : 0 : } else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
# # ]
2991 : : mhdr_action->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
2992 : : uint32_t tmp;
2993 : :
2994 : : /*
2995 : : * Both QFI and Geneve option type are passed as an uint8_t integer,
2996 : : * but it is accessed through a 2nd least significant byte of a 32-bit
2997 : : * field in modify header command.
2998 : : */
2999 : 0 : tmp = values[0];
3000 : : value_p = (unaligned_uint32_t *)values;
3001 [ # # # # : 0 : *value_p = rte_cpu_to_be_32(tmp << 8);
# # ]
3002 : : }
3003 : 0 : i = act_data->modify_header.mhdr_cmds_off;
3004 : 0 : field = act_data->modify_header.field;
3005 : : do {
3006 : : uint32_t off_b;
3007 : : uint32_t mask;
3008 : : uint32_t data;
3009 : : const uint8_t *mask_src;
3010 : :
3011 [ # # # # : 0 : if (i >= act_data->modify_header.mhdr_cmds_end)
# # # # #
# # # ]
3012 : : return -1;
3013 [ # # # # : 0 : if (flow_hw_mhdr_cmd_is_nop(&mhdr_cmd[i])) {
# # # # #
# # # # #
# # # # #
# # # #
# ]
3014 : 0 : ++i;
3015 : 0 : continue;
3016 : : }
3017 : 0 : mask_src = (const uint8_t *)act_data->modify_header.mask;
3018 : 0 : mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
3019 [ # # # # : 0 : if (!mask) {
# # # # #
# # # ]
3020 : 0 : ++field;
3021 : 0 : continue;
3022 : : }
3023 : 0 : off_b = rte_bsf32(mask);
3024 : 0 : data = flow_dv_fetch_field(values + field->offset, field->size);
3025 : : /*
3026 : : * IPv6 DSCP uses OUT_IPV6_TRAFFIC_CLASS as ID but it starts from 2
3027 : : * bits left. Shift the data left for IPv6 DSCP
3028 : : */
3029 [ # # # # : 0 : if (field->id == MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS &&
# # # # #
# # # ]
3030 : : mhdr_action->dst.field == RTE_FLOW_FIELD_IPV6_DSCP)
3031 : 0 : data <<= MLX5_IPV6_HDR_DSCP_SHIFT;
3032 : 0 : data = (data & mask) >> off_b;
3033 [ # # # # : 0 : mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
# # # # #
# # # ]
3034 : 0 : ++field;
3035 [ # # # # : 0 : } while (field->size);
# # # # #
# # # ]
3036 : : return 0;
3037 : : }
3038 : :
3039 : : /**
3040 : : * Release any actions allocated for the flow rule during actions construction.
3041 : : *
3042 : : * @param[in] flow
3043 : : * Pointer to flow structure.
3044 : : */
3045 : : static void
3046 : 0 : flow_hw_release_actions(struct rte_eth_dev *dev,
3047 : : uint32_t queue,
3048 : : struct rte_flow_hw *flow)
3049 : : {
3050 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3051 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3052 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3053 : :
3054 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
3055 : 0 : flow_hw_jump_release(dev, flow->jump);
3056 [ # # ]: 0 : else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
3057 : 0 : mlx5_hrxq_obj_release(dev, flow->hrxq);
3058 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
3059 : 0 : flow_hw_age_count_release(priv, queue, flow, NULL);
3060 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
3061 : 0 : mlx5_ipool_free(pool->idx_pool, mlx5_flow_hw_aux_get_mtr_id(flow, aux));
3062 : 0 : }
3063 : :
3064 : : /**
3065 : : * Construct flow action array.
3066 : : *
3067 : : * For action template contains dynamic actions, these actions need to
3068 : : * be updated according to the rte_flow action during flow creation.
3069 : : *
3070 : : * @param[in] dev
3071 : : * Pointer to the rte_eth_dev structure.
3072 : : * @param[in] flow
3073 : : * Pointer to flow structure.
3074 : : * @param[in] ap
3075 : : * Pointer to container for temporarily constructed actions' parameters.
3076 : : * @param[in] hw_acts
3077 : : * Pointer to translated actions from template.
3078 : : * @param[in] it_idx
3079 : : * Item template index the action template refer to.
3080 : : * @param[in] actions
3081 : : * Array of rte_flow action need to be checked.
3082 : : * @param[in] rule_acts
3083 : : * Array of DR rule actions to be used during flow creation..
3084 : : * @param[in] acts_num
3085 : : * Pointer to the real acts_num flow has.
3086 : : *
3087 : : * @return
3088 : : * 0 on success, negative value otherwise and rte_errno is set.
3089 : : */
3090 : : static __rte_always_inline int
3091 : : flow_hw_actions_construct(struct rte_eth_dev *dev,
3092 : : struct rte_flow_hw *flow,
3093 : : struct mlx5_flow_hw_action_params *ap,
3094 : : const struct mlx5_hw_action_template *hw_at,
3095 : : const uint8_t it_idx,
3096 : : const struct rte_flow_action actions[],
3097 : : struct mlx5dr_rule_action *rule_acts,
3098 : : uint32_t queue,
3099 : : struct rte_flow_error *error)
3100 : : {
3101 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3102 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3103 : 0 : struct rte_flow_template_table *table = flow->table;
3104 : : struct mlx5_action_construct_data *act_data;
3105 : 0 : const struct rte_flow_actions_template *at = hw_at->action_template;
3106 : : const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
3107 : : const struct rte_flow_action *action;
3108 : : const struct rte_flow_action_raw_encap *raw_encap_data;
3109 : : const struct rte_flow_action_ipv6_ext_push *ipv6_push;
3110 : : const struct rte_flow_item *enc_item = NULL;
3111 : : const struct rte_flow_action_ethdev *port_action = NULL;
3112 : : const struct rte_flow_action_meter *meter = NULL;
3113 : : const struct rte_flow_action_age *age = NULL;
3114 : : const struct rte_flow_action_nat64 *nat64_c = NULL;
3115 : 0 : struct rte_flow_attr attr = {
3116 : : .ingress = 1,
3117 : : };
3118 : : uint32_t ft_flag;
3119 : : int ret;
3120 : 0 : size_t encap_len = 0;
3121 : : uint32_t age_idx = 0;
3122 : : uint32_t mtr_idx = 0;
3123 : : struct mlx5_aso_mtr *aso_mtr;
3124 : : struct mlx5_multi_pattern_segment *mp_segment = NULL;
3125 : : struct rte_flow_hw_aux *aux;
3126 : :
3127 : 0 : attr.group = table->grp->group_id;
3128 : 0 : ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
3129 : 0 : if (table->type == MLX5DR_TABLE_TYPE_FDB) {
3130 : 0 : attr.transfer = 1;
3131 : : attr.ingress = 1;
3132 [ # # # # : 0 : } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
# # ]
3133 : 0 : attr.egress = 1;
3134 : 0 : attr.ingress = 0;
3135 : : } else {
3136 : : attr.ingress = 1;
3137 : : }
3138 [ # # # # : 0 : if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0 && !hw_acts->mhdr->shared) {
# # # # #
# # # # #
# # # # ]
3139 : 0 : uint16_t pos = hw_acts->mhdr->pos;
3140 : :
3141 : 0 : mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3142 [ # # # # : 0 : if (!mp_segment || !mp_segment->mhdr_action)
# # # # #
# # # ]
3143 : : return -1;
3144 : 0 : rule_acts[pos].action = mp_segment->mhdr_action;
3145 : : /* offset is relative to DR action */
3146 : 0 : rule_acts[pos].modify_header.offset =
3147 : 0 : flow->res_idx - mp_segment->head_index;
3148 : 0 : rule_acts[pos].modify_header.data =
3149 : : (uint8_t *)ap->mhdr_cmd;
3150 : 0 : rte_memcpy(ap->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
3151 [ # # # # : 0 : sizeof(*ap->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
# # ]
3152 : : }
3153 [ # # # # : 0 : LIST_FOREACH(act_data, &hw_acts->act_list, next) {
# # ]
3154 : : uint32_t jump_group;
3155 : : uint32_t tag;
3156 : : uint64_t item_flags;
3157 : : struct mlx5_hw_jump_action *jump;
3158 : : struct mlx5_hrxq *hrxq;
3159 : : uint32_t ct_idx;
3160 : : cnt_id_t cnt_id;
3161 : : uint32_t *cnt_queue;
3162 : : uint32_t mtr_id;
3163 : :
3164 : 0 : action = &actions[act_data->action_src];
3165 : : /*
3166 : : * action template construction replaces
3167 : : * OF_SET_VLAN_VID with MODIFY_FIELD
3168 : : */
3169 : 0 : if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3170 : : MLX5_ASSERT(act_data->type ==
3171 : : RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
3172 : : else
3173 : : MLX5_ASSERT(action->type ==
3174 : : RTE_FLOW_ACTION_TYPE_INDIRECT ||
3175 : : (int)action->type == act_data->type);
3176 [ # # # # : 0 : switch ((int)act_data->type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
3177 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
3178 : 0 : act_data->indirect_list_cb(dev, act_data, actions,
3179 : 0 : &rule_acts[act_data->action_dst]);
3180 : 0 : break;
3181 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
3182 : 0 : if (flow_hw_shared_action_construct
3183 : : (dev, queue, action, table, it_idx,
3184 : 0 : at->action_flags, flow,
3185 [ # # # # : 0 : &rule_acts[act_data->action_dst]))
# # # # #
# # # # #
# # # # #
# # ]
3186 : 0 : goto error;
3187 : : break;
3188 : : case RTE_FLOW_ACTION_TYPE_VOID:
3189 : : break;
3190 : 0 : case RTE_FLOW_ACTION_TYPE_MARK:
3191 : 0 : tag = mlx5_flow_mark_set
3192 : : (((const struct rte_flow_action_mark *)
3193 : 0 : (action->conf))->id);
3194 : 0 : rule_acts[act_data->action_dst].tag.value = tag;
3195 : 0 : break;
3196 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3197 : 0 : rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
3198 : 0 : vlan_hdr_to_be32(action);
3199 : 0 : break;
3200 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
3201 : 0 : jump_group = ((const struct rte_flow_action_jump *)
3202 : 0 : action->conf)->group;
3203 : 0 : jump = flow_hw_jump_action_register
3204 : 0 : (dev, &table->cfg, jump_group, NULL);
3205 [ # # # # : 0 : if (!jump)
# # ]
3206 : 0 : goto error;
3207 : 0 : rule_acts[act_data->action_dst].action =
3208 [ # # # # : 0 : (!!attr.group) ? jump->hws_action : jump->root_action;
# # ]
3209 : 0 : flow->jump = jump;
3210 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3211 : 0 : break;
3212 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
3213 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
3214 : 0 : hrxq = flow_hw_tir_action_register(dev,
3215 : : ft_flag,
3216 : : action);
3217 [ # # # # : 0 : if (!hrxq)
# # ]
3218 : 0 : goto error;
3219 : 0 : rule_acts[act_data->action_dst].action = hrxq->action;
3220 : 0 : flow->hrxq = hrxq;
3221 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ;
3222 : 0 : break;
3223 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3224 : 0 : item_flags = table->its[it_idx]->item_flags;
3225 : 0 : if (flow_hw_shared_action_get
3226 : : (dev, act_data, item_flags,
3227 : 0 : &rule_acts[act_data->action_dst]))
3228 : 0 : goto error;
3229 : : break;
3230 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3231 : 0 : enc_item = ((const struct rte_flow_action_vxlan_encap *)
3232 : 0 : action->conf)->definition;
3233 [ # # # # : 0 : if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
# # ]
3234 : 0 : goto error;
3235 : : break;
3236 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3237 : 0 : enc_item = ((const struct rte_flow_action_nvgre_encap *)
3238 : 0 : action->conf)->definition;
3239 [ # # # # : 0 : if (flow_dv_convert_encap_data(enc_item, ap->encap_data, &encap_len, NULL))
# # ]
3240 : 0 : goto error;
3241 : : break;
3242 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3243 : 0 : raw_encap_data =
3244 : : (const struct rte_flow_action_raw_encap *)
3245 : : action->conf;
3246 : : MLX5_ASSERT(raw_encap_data->size == act_data->encap.len);
3247 [ # # # # : 0 : if (unlikely(act_data->encap.len > MLX5_ENCAP_MAX_LEN))
# # ]
3248 : : return -1;
3249 [ # # # # : 0 : rte_memcpy(ap->encap_data, raw_encap_data->data, act_data->encap.len);
# # ]
3250 : : break;
3251 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
3252 : 0 : ipv6_push =
3253 : : (const struct rte_flow_action_ipv6_ext_push *)action->conf;
3254 : : MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
3255 [ # # # # : 0 : if (unlikely(act_data->ipv6_ext.len > MLX5_PUSH_MAX_LEN))
# # ]
3256 : : return -1;
3257 [ # # # # : 0 : rte_memcpy(ap->ipv6_push_data, ipv6_push->data,
# # ]
3258 : : act_data->ipv6_ext.len);
3259 : : break;
3260 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3261 [ # # # # : 0 : if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
# # ]
3262 : : ret = flow_hw_set_vlan_vid_construct(dev, ap->mhdr_cmd,
3263 : : act_data,
3264 : : hw_acts,
3265 : : action);
3266 : : else
3267 : : ret = flow_hw_modify_field_construct(ap->mhdr_cmd,
3268 : : act_data,
3269 : : hw_acts,
3270 : : action);
3271 [ # # # # : 0 : if (ret)
# # ]
3272 : 0 : goto error;
3273 : : break;
3274 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3275 : 0 : port_action = action->conf;
3276 [ # # # # : 0 : if (!priv->hw_vport[port_action->port_id])
# # ]
3277 : 0 : goto error;
3278 : 0 : rule_acts[act_data->action_dst].action =
3279 : : priv->hw_vport[port_action->port_id];
3280 : 0 : break;
3281 : 0 : case RTE_FLOW_ACTION_TYPE_QUOTA:
3282 : 0 : flow_hw_construct_quota(priv,
3283 : 0 : rule_acts + act_data->action_dst,
3284 : : act_data->shared_meter.id);
3285 : 0 : break;
3286 : 0 : case RTE_FLOW_ACTION_TYPE_METER:
3287 : 0 : meter = action->conf;
3288 : 0 : mtr_id = meter->mtr_id;
3289 : 0 : aso_mtr = mlx5_aso_meter_by_idx(priv, mtr_id);
3290 : 0 : rule_acts[act_data->action_dst].action =
3291 : 0 : priv->mtr_bulk.action;
3292 : 0 : rule_acts[act_data->action_dst].aso_meter.offset =
3293 : 0 : aso_mtr->offset;
3294 : 0 : jump = flow_hw_jump_action_register
3295 : 0 : (dev, &table->cfg, aso_mtr->fm.group, NULL);
3296 [ # # # # : 0 : if (!jump)
# # ]
3297 : 0 : goto error;
3298 : : MLX5_ASSERT
3299 : : (!rule_acts[act_data->action_dst + 1].action);
3300 : 0 : rule_acts[act_data->action_dst + 1].action =
3301 [ # # # # : 0 : (!!attr.group) ? jump->hws_action :
# # ]
3302 : : jump->root_action;
3303 : 0 : flow->jump = jump;
3304 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3305 [ # # # # : 0 : if (mlx5_aso_mtr_wait(priv, aso_mtr, true))
# # ]
3306 : 0 : goto error;
3307 : : break;
3308 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
3309 : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3310 : 0 : age = action->conf;
3311 : : /*
3312 : : * First, create the AGE parameter, then create its
3313 : : * counter later:
3314 : : * Regular counter - in next case.
3315 : : * Indirect counter - update it after the loop.
3316 : : */
3317 : 0 : age_idx = mlx5_hws_age_action_create(priv, queue, 0,
3318 : : age,
3319 : : flow->res_idx,
3320 : : error);
3321 [ # # # # : 0 : if (age_idx == 0)
# # ]
3322 : 0 : goto error;
3323 : : mlx5_flow_hw_aux_set_age_idx(flow, aux, age_idx);
3324 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3325 [ # # # # : 0 : if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
# # ]
3326 : : /*
3327 : : * When AGE uses indirect counter, no need to
3328 : : * create counter but need to update it with the
3329 : : * AGE parameter, will be done after the loop.
3330 : : */
3331 : : break;
3332 : : /* Fall-through. */
3333 : : case RTE_FLOW_ACTION_TYPE_COUNT:
3334 : : cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
3335 : : ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
3336 [ # # # # : 0 : if (ret != 0)
# # ]
3337 : 0 : goto error;
3338 : 0 : ret = mlx5_hws_cnt_pool_get_action_offset
3339 : : (priv->hws_cpool,
3340 : : cnt_id,
3341 : : &rule_acts[act_data->action_dst].action,
3342 : 0 : &rule_acts[act_data->action_dst].counter.offset
3343 : : );
3344 : : if (ret != 0)
3345 : : goto error;
3346 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3347 : 0 : flow->cnt_id = cnt_id;
3348 : 0 : break;
3349 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
3350 : 0 : ret = mlx5_hws_cnt_pool_get_action_offset
3351 : : (priv->hws_cpool,
3352 : : act_data->shared_counter.id,
3353 : : &rule_acts[act_data->action_dst].action,
3354 : 0 : &rule_acts[act_data->action_dst].counter.offset
3355 : : );
3356 : : if (ret != 0)
3357 : : goto error;
3358 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3359 : 0 : flow->cnt_id = act_data->shared_counter.id;
3360 : 0 : break;
3361 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3362 : 0 : ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(action->conf);
3363 : 0 : if (flow_hw_ct_compile(dev, queue, ct_idx,
3364 : 0 : &rule_acts[act_data->action_dst]))
3365 : 0 : goto error;
3366 : : break;
3367 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
3368 : 0 : mtr_id = act_data->shared_meter.id &
3369 : : ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3370 : : /* Find ASO object. */
3371 : 0 : aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
3372 [ # # # # : 0 : if (!aso_mtr)
# # ]
3373 : 0 : goto error;
3374 : 0 : rule_acts[act_data->action_dst].action =
3375 : 0 : pool->action;
3376 : 0 : rule_acts[act_data->action_dst].aso_meter.offset =
3377 : 0 : aso_mtr->offset;
3378 : 0 : break;
3379 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
3380 : : /*
3381 : : * Allocate meter directly will slow down flow
3382 : : * insertion rate.
3383 : : */
3384 : : ret = flow_hw_meter_mark_compile(dev,
3385 : 0 : act_data->action_dst, action,
3386 : : rule_acts, &mtr_idx, MLX5_HW_INV_QUEUE, error);
3387 : : if (ret != 0)
3388 : 0 : goto error;
3389 : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3390 : : mlx5_flow_hw_aux_set_mtr_id(flow, aux, mtr_idx);
3391 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MTR_ID;
3392 : 0 : break;
3393 : 0 : case RTE_FLOW_ACTION_TYPE_NAT64:
3394 : 0 : nat64_c = action->conf;
3395 : 0 : rule_acts[act_data->action_dst].action =
3396 : 0 : priv->action_nat64[table->type][nat64_c->type];
3397 : 0 : break;
3398 : : default:
3399 : : break;
3400 : : }
3401 : : }
3402 [ # # # # : 0 : if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
# # ]
3403 : : /* If indirect count is used, then CNT_ID flag should be set. */
3404 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
3405 [ # # # # : 0 : if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
# # ]
3406 : : /* If indirect AGE is used, then AGE_IDX flag should be set. */
3407 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX);
3408 : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3409 : 0 : age_idx = mlx5_flow_hw_aux_get_age_idx(flow, aux) &
3410 : : MLX5_HWS_AGE_IDX_MASK;
3411 [ # # # # : 0 : if (mlx5_hws_cnt_age_get(priv->hws_cpool, flow->cnt_id) != age_idx)
# # # # #
# # # ]
3412 : : /*
3413 : : * This is first use of this indirect counter
3414 : : * for this indirect AGE, need to increase the
3415 : : * number of counters.
3416 : : */
3417 : : mlx5_hws_age_nb_cnt_increase(priv, age_idx);
3418 : : }
3419 : : /*
3420 : : * Update this indirect counter the indirect/direct AGE in which
3421 : : * using it.
3422 : : */
3423 [ # # # # : 0 : mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, age_idx);
# # ]
3424 : : }
3425 [ # # # # : 0 : if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
# # # # #
# # # ]
3426 [ # # # # : 0 : int ix = mlx5_multi_pattern_reformat_to_index(hw_acts->encap_decap->action_type);
# # ]
3427 : 0 : struct mlx5dr_rule_action *ra = &rule_acts[hw_acts->encap_decap_pos];
3428 : :
3429 [ # # # # : 0 : if (ix < 0)
# # ]
3430 : 0 : goto error;
3431 [ # # # # : 0 : if (!mp_segment)
# # ]
3432 : 0 : mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3433 [ # # # # : 0 : if (!mp_segment || !mp_segment->reformat_action[ix])
# # # # #
# # # ]
3434 : 0 : goto error;
3435 : 0 : ra->action = mp_segment->reformat_action[ix];
3436 : : /* reformat offset is relative to selected DR action */
3437 : 0 : ra->reformat.offset = flow->res_idx - mp_segment->head_index;
3438 : 0 : ra->reformat.data = ap->encap_data;
3439 : : }
3440 [ # # # # : 0 : if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
# # # # #
# # # ]
3441 : 0 : rule_acts[hw_acts->push_remove_pos].ipv6_ext.offset =
3442 : 0 : flow->res_idx - 1;
3443 : 0 : rule_acts[hw_acts->push_remove_pos].ipv6_ext.header = ap->ipv6_push_data;
3444 : : }
3445 [ # # # # : 0 : if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) {
# # ]
3446 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3447 : 0 : flow->cnt_id = hw_acts->cnt_id;
3448 : : }
3449 : : return 0;
3450 : :
3451 : 0 : error:
3452 : 0 : flow_hw_release_actions(dev, queue, flow);
3453 : 0 : rte_errno = EINVAL;
3454 : : return -rte_errno;
3455 : : }
3456 : :
3457 : : static const struct rte_flow_item *
3458 : 0 : flow_hw_get_rule_items(struct rte_eth_dev *dev,
3459 : : const struct rte_flow_template_table *table,
3460 : : const struct rte_flow_item items[],
3461 : : uint8_t pattern_template_index,
3462 : : struct mlx5_flow_hw_pattern_params *pp)
3463 : : {
3464 : 0 : struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
3465 : :
3466 : : /* Only one implicit item can be added to flow rule pattern. */
3467 : : MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
3468 : : /* At least one item was allocated in pattern params for items. */
3469 : : MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
3470 [ # # ]: 0 : if (pt->implicit_port) {
3471 [ # # ]: 0 : if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3472 : 0 : rte_errno = ENOMEM;
3473 : 0 : return NULL;
3474 : : }
3475 : : /* Set up represented port item in pattern params. */
3476 : 0 : pp->port_spec = (struct rte_flow_item_ethdev){
3477 : 0 : .port_id = dev->data->port_id,
3478 : : };
3479 : 0 : pp->items[0] = (struct rte_flow_item){
3480 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
3481 : 0 : .spec = &pp->port_spec,
3482 : : };
3483 [ # # ]: 0 : rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3484 : 0 : return pp->items;
3485 [ # # ]: 0 : } else if (pt->implicit_tag) {
3486 [ # # ]: 0 : if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
3487 : 0 : rte_errno = ENOMEM;
3488 : 0 : return NULL;
3489 : : }
3490 : : /* Set up tag item in pattern params. */
3491 : 0 : pp->tag_spec = (struct rte_flow_item_tag){
3492 : : .data = flow_hw_tx_tag_regc_value(dev),
3493 : : };
3494 : 0 : pp->items[0] = (struct rte_flow_item){
3495 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3496 : 0 : .spec = &pp->tag_spec,
3497 : : };
3498 : 0 : rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
3499 : 0 : return pp->items;
3500 : : } else {
3501 : : return items;
3502 : : }
3503 : : }
3504 : :
3505 : : /**
3506 : : * Enqueue HW steering flow creation.
3507 : : *
3508 : : * The flow will be applied to the HW only if the postpone bit is not set or
3509 : : * the extra push function is called.
3510 : : * The flow creation status should be checked from dequeue result.
3511 : : *
3512 : : * @param[in] dev
3513 : : * Pointer to the rte_eth_dev structure.
3514 : : * @param[in] queue
3515 : : * The queue to create the flow.
3516 : : * @param[in] attr
3517 : : * Pointer to the flow operation attributes.
3518 : : * @param[in] items
3519 : : * Items with flow spec value.
3520 : : * @param[in] pattern_template_index
3521 : : * The item pattern flow follows from the table.
3522 : : * @param[in] actions
3523 : : * Action with flow spec value.
3524 : : * @param[in] action_template_index
3525 : : * The action pattern flow follows from the table.
3526 : : * @param[in] user_data
3527 : : * Pointer to the user_data.
3528 : : * @param[out] error
3529 : : * Pointer to error structure.
3530 : : *
3531 : : * @return
3532 : : * Flow pointer on success, NULL otherwise and rte_errno is set.
3533 : : */
3534 : : static struct rte_flow *
3535 : 0 : flow_hw_async_flow_create(struct rte_eth_dev *dev,
3536 : : uint32_t queue,
3537 : : const struct rte_flow_op_attr *attr,
3538 : : struct rte_flow_template_table *table,
3539 : : const struct rte_flow_item items[],
3540 : : uint8_t pattern_template_index,
3541 : : const struct rte_flow_action actions[],
3542 : : uint8_t action_template_index,
3543 : : void *user_data,
3544 : : struct rte_flow_error *error)
3545 : : {
3546 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3547 : 0 : struct mlx5dr_rule_attr rule_attr = {
3548 : : .queue_id = queue,
3549 : : .user_data = user_data,
3550 : 0 : .burst = attr->postpone,
3551 : : };
3552 : : struct mlx5dr_rule_action *rule_acts;
3553 : : struct mlx5_flow_hw_action_params ap;
3554 : : struct mlx5_flow_hw_pattern_params pp;
3555 : : struct rte_flow_hw *flow = NULL;
3556 : : const struct rte_flow_item *rule_items;
3557 : 0 : uint32_t flow_idx = 0;
3558 : 0 : uint32_t res_idx = 0;
3559 : : int ret;
3560 : :
3561 : 0 : flow = mlx5_ipool_malloc(table->flow, &flow_idx);
3562 [ # # ]: 0 : if (!flow)
3563 : 0 : goto error;
3564 : 0 : rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
3565 : : /*
3566 : : * Set the table here in order to know the destination table
3567 : : * when free the flow afterward.
3568 : : */
3569 : 0 : flow->table = table;
3570 : 0 : flow->mt_idx = pattern_template_index;
3571 : 0 : flow->idx = flow_idx;
3572 [ # # ]: 0 : if (table->resource) {
3573 : 0 : mlx5_ipool_malloc(table->resource, &res_idx);
3574 [ # # ]: 0 : if (!res_idx)
3575 : 0 : goto error;
3576 : 0 : flow->res_idx = res_idx;
3577 : : } else {
3578 : 0 : flow->res_idx = flow_idx;
3579 : : }
3580 : 0 : flow->flags = 0;
3581 : : /*
3582 : : * Set the flow operation type here in order to know if the flow memory
3583 : : * should be freed or not when get the result from dequeue.
3584 : : */
3585 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
3586 : 0 : flow->user_data = user_data;
3587 : 0 : rule_attr.user_data = flow;
3588 : : /*
3589 : : * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
3590 : : * for rule insertion hints.
3591 : : */
3592 : 0 : flow->rule_idx = flow->res_idx - 1;
3593 : 0 : rule_attr.rule_idx = flow->rule_idx;
3594 : : /*
3595 : : * Construct the flow actions based on the input actions.
3596 : : * The implicitly appended action is always fixed, like metadata
3597 : : * copy action from FDB to NIC Rx.
3598 : : * No need to copy and contrust a new "actions" list based on the
3599 : : * user's input, in order to save the cost.
3600 : : */
3601 : 0 : if (flow_hw_actions_construct(dev, flow, &ap,
3602 [ # # ]: 0 : &table->ats[action_template_index],
3603 : : pattern_template_index, actions,
3604 : : rule_acts, queue, error))
3605 : 0 : goto error;
3606 : 0 : rule_items = flow_hw_get_rule_items(dev, table, items,
3607 : : pattern_template_index, &pp);
3608 [ # # ]: 0 : if (!rule_items)
3609 : 0 : goto error;
3610 [ # # ]: 0 : if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
3611 : 0 : ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
3612 : : pattern_template_index, rule_items,
3613 : : action_template_index, rule_acts,
3614 : : &rule_attr,
3615 : 0 : (struct mlx5dr_rule *)flow->rule);
3616 : : } else {
3617 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3618 : : uint32_t selector;
3619 : :
3620 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;
3621 : 0 : rte_rwlock_read_lock(&table->matcher_replace_rwlk);
3622 : 0 : selector = table->matcher_selector;
3623 : 0 : ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
3624 : : pattern_template_index, rule_items,
3625 : : action_template_index, rule_acts,
3626 : : &rule_attr,
3627 : 0 : (struct mlx5dr_rule *)flow->rule);
3628 : 0 : rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
3629 : 0 : aux->matcher_selector = selector;
3630 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR;
3631 : : }
3632 [ # # ]: 0 : if (likely(!ret)) {
3633 : 0 : flow_hw_q_inc_flow_ops(priv, queue);
3634 : 0 : return (struct rte_flow *)flow;
3635 : : }
3636 : 0 : error:
3637 [ # # # # ]: 0 : if (table->resource && res_idx)
3638 : 0 : mlx5_ipool_free(table->resource, res_idx);
3639 [ # # ]: 0 : if (flow_idx)
3640 : 0 : mlx5_ipool_free(table->flow, flow_idx);
3641 : 0 : rte_flow_error_set(error, rte_errno,
3642 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3643 : : "fail to create rte flow");
3644 : 0 : return NULL;
3645 : : }
3646 : :
3647 : : /**
3648 : : * Enqueue HW steering flow creation by index.
3649 : : *
3650 : : * The flow will be applied to the HW only if the postpone bit is not set or
3651 : : * the extra push function is called.
3652 : : * The flow creation status should be checked from dequeue result.
3653 : : *
3654 : : * @param[in] dev
3655 : : * Pointer to the rte_eth_dev structure.
3656 : : * @param[in] queue
3657 : : * The queue to create the flow.
3658 : : * @param[in] attr
3659 : : * Pointer to the flow operation attributes.
3660 : : * @param[in] rule_index
3661 : : * The item pattern flow follows from the table.
3662 : : * @param[in] actions
3663 : : * Action with flow spec value.
3664 : : * @param[in] action_template_index
3665 : : * The action pattern flow follows from the table.
3666 : : * @param[in] user_data
3667 : : * Pointer to the user_data.
3668 : : * @param[out] error
3669 : : * Pointer to error structure.
3670 : : *
3671 : : * @return
3672 : : * Flow pointer on success, NULL otherwise and rte_errno is set.
3673 : : */
3674 : : static struct rte_flow *
3675 : 0 : flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
3676 : : uint32_t queue,
3677 : : const struct rte_flow_op_attr *attr,
3678 : : struct rte_flow_template_table *table,
3679 : : uint32_t rule_index,
3680 : : const struct rte_flow_action actions[],
3681 : : uint8_t action_template_index,
3682 : : void *user_data,
3683 : : struct rte_flow_error *error)
3684 : : {
3685 : 0 : struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}};
3686 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3687 : 0 : struct mlx5dr_rule_attr rule_attr = {
3688 : : .queue_id = queue,
3689 : : .user_data = user_data,
3690 : 0 : .burst = attr->postpone,
3691 : : };
3692 : : struct mlx5dr_rule_action *rule_acts;
3693 : : struct mlx5_flow_hw_action_params ap;
3694 : : struct rte_flow_hw *flow = NULL;
3695 : 0 : uint32_t flow_idx = 0;
3696 : 0 : uint32_t res_idx = 0;
3697 : : int ret;
3698 : :
3699 [ # # ]: 0 : if (unlikely(rule_index >= table->cfg.attr.nb_flows)) {
3700 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3701 : : "Flow rule index exceeds table size");
3702 : 0 : return NULL;
3703 : : }
3704 : 0 : flow = mlx5_ipool_malloc(table->flow, &flow_idx);
3705 [ # # ]: 0 : if (!flow)
3706 : 0 : goto error;
3707 : 0 : rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
3708 : : /*
3709 : : * Set the table here in order to know the destination table
3710 : : * when free the flow afterwards.
3711 : : */
3712 : 0 : flow->table = table;
3713 : 0 : flow->mt_idx = 0;
3714 : 0 : flow->idx = flow_idx;
3715 [ # # ]: 0 : if (table->resource) {
3716 : 0 : mlx5_ipool_malloc(table->resource, &res_idx);
3717 [ # # ]: 0 : if (!res_idx)
3718 : 0 : goto error;
3719 : 0 : flow->res_idx = res_idx;
3720 : : } else {
3721 : 0 : flow->res_idx = flow_idx;
3722 : : }
3723 : 0 : flow->flags = 0;
3724 : : /*
3725 : : * Set the flow operation type here in order to know if the flow memory
3726 : : * should be freed or not when get the result from dequeue.
3727 : : */
3728 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
3729 : 0 : flow->user_data = user_data;
3730 : 0 : rule_attr.user_data = flow;
3731 : : /* Set the rule index. */
3732 : 0 : flow->rule_idx = rule_index;
3733 : 0 : rule_attr.rule_idx = flow->rule_idx;
3734 : : /*
3735 : : * Construct the flow actions based on the input actions.
3736 : : * The implicitly appended action is always fixed, like metadata
3737 : : * copy action from FDB to NIC Rx.
3738 : : * No need to copy and contrust a new "actions" list based on the
3739 : : * user's input, in order to save the cost.
3740 : : */
3741 : 0 : if (flow_hw_actions_construct(dev, flow, &ap,
3742 [ # # ]: 0 : &table->ats[action_template_index],
3743 : : 0, actions, rule_acts, queue, error)) {
3744 : 0 : rte_errno = EINVAL;
3745 : 0 : goto error;
3746 : : }
3747 [ # # ]: 0 : if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
3748 : 0 : ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
3749 : : 0, items, action_template_index,
3750 : : rule_acts, &rule_attr,
3751 : 0 : (struct mlx5dr_rule *)flow->rule);
3752 : : } else {
3753 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3754 : : uint32_t selector;
3755 : :
3756 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;
3757 : 0 : rte_rwlock_read_lock(&table->matcher_replace_rwlk);
3758 : 0 : selector = table->matcher_selector;
3759 : 0 : ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
3760 : : 0, items, action_template_index,
3761 : : rule_acts, &rule_attr,
3762 : 0 : (struct mlx5dr_rule *)flow->rule);
3763 : 0 : rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
3764 : 0 : aux->matcher_selector = selector;
3765 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR;
3766 : : }
3767 [ # # ]: 0 : if (likely(!ret)) {
3768 : 0 : flow_hw_q_inc_flow_ops(priv, queue);
3769 : 0 : return (struct rte_flow *)flow;
3770 : : }
3771 : 0 : error:
3772 [ # # # # ]: 0 : if (table->resource && res_idx)
3773 : 0 : mlx5_ipool_free(table->resource, res_idx);
3774 [ # # ]: 0 : if (flow_idx)
3775 : 0 : mlx5_ipool_free(table->flow, flow_idx);
3776 : 0 : rte_flow_error_set(error, rte_errno,
3777 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3778 : : "fail to create rte flow");
3779 : 0 : return NULL;
3780 : : }
3781 : :
3782 : : /**
3783 : : * Enqueue HW steering flow update.
3784 : : *
3785 : : * The flow will be applied to the HW only if the postpone bit is not set or
3786 : : * the extra push function is called.
3787 : : * The flow destruction status should be checked from dequeue result.
3788 : : *
3789 : : * @param[in] dev
3790 : : * Pointer to the rte_eth_dev structure.
3791 : : * @param[in] queue
3792 : : * The queue to destroy the flow.
3793 : : * @param[in] attr
3794 : : * Pointer to the flow operation attributes.
3795 : : * @param[in] flow
3796 : : * Pointer to the flow to be destroyed.
3797 : : * @param[in] actions
3798 : : * Action with flow spec value.
3799 : : * @param[in] action_template_index
3800 : : * The action pattern flow follows from the table.
3801 : : * @param[in] user_data
3802 : : * Pointer to the user_data.
3803 : : * @param[out] error
3804 : : * Pointer to error structure.
3805 : : *
3806 : : * @return
3807 : : * 0 on success, negative value otherwise and rte_errno is set.
3808 : : */
3809 : : static int
3810 : 0 : flow_hw_async_flow_update(struct rte_eth_dev *dev,
3811 : : uint32_t queue,
3812 : : const struct rte_flow_op_attr *attr,
3813 : : struct rte_flow *flow,
3814 : : const struct rte_flow_action actions[],
3815 : : uint8_t action_template_index,
3816 : : void *user_data,
3817 : : struct rte_flow_error *error)
3818 : : {
3819 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3820 : 0 : struct mlx5dr_rule_attr rule_attr = {
3821 : : .queue_id = queue,
3822 : : .user_data = user_data,
3823 : 0 : .burst = attr->postpone,
3824 : : };
3825 : : struct mlx5dr_rule_action *rule_acts;
3826 : : struct mlx5_flow_hw_action_params ap;
3827 : : struct rte_flow_hw *of = (struct rte_flow_hw *)flow;
3828 : : struct rte_flow_hw *nf;
3829 : : struct rte_flow_hw_aux *aux;
3830 : 0 : struct rte_flow_template_table *table = of->table;
3831 : 0 : uint32_t res_idx = 0;
3832 : : int ret;
3833 : :
3834 : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, of);
3835 [ # # ]: 0 : nf = &aux->upd_flow;
3836 : : memset(nf, 0, sizeof(struct rte_flow_hw));
3837 : 0 : rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
3838 : : /*
3839 : : * Set the table here in order to know the destination table
3840 : : * when free the flow afterwards.
3841 : : */
3842 : 0 : nf->table = table;
3843 : 0 : nf->mt_idx = of->mt_idx;
3844 : 0 : nf->idx = of->idx;
3845 [ # # ]: 0 : if (table->resource) {
3846 : 0 : mlx5_ipool_malloc(table->resource, &res_idx);
3847 [ # # ]: 0 : if (!res_idx)
3848 : 0 : goto error;
3849 : 0 : nf->res_idx = res_idx;
3850 : : } else {
3851 : 0 : nf->res_idx = of->res_idx;
3852 : : }
3853 : 0 : nf->flags = 0;
3854 : : /* Indicate the construction function to set the proper fields. */
3855 : 0 : nf->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
3856 : : /*
3857 : : * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
3858 : : * for rule insertion hints.
3859 : : * If there is only one STE, the update will be atomic by nature.
3860 : : */
3861 : 0 : nf->rule_idx = nf->res_idx - 1;
3862 : 0 : rule_attr.rule_idx = nf->rule_idx;
3863 : : /*
3864 : : * Construct the flow actions based on the input actions.
3865 : : * The implicitly appended action is always fixed, like metadata
3866 : : * copy action from FDB to NIC Rx.
3867 : : * No need to copy and contrust a new "actions" list based on the
3868 : : * user's input, in order to save the cost.
3869 : : */
3870 : 0 : if (flow_hw_actions_construct(dev, nf, &ap,
3871 : 0 : &table->ats[action_template_index],
3872 [ # # ]: 0 : nf->mt_idx, actions,
3873 : : rule_acts, queue, error)) {
3874 : 0 : rte_errno = EINVAL;
3875 : 0 : goto error;
3876 : : }
3877 : : /*
3878 : : * Set the flow operation type here in order to know if the flow memory
3879 : : * should be freed or not when get the result from dequeue.
3880 : : */
3881 : 0 : of->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
3882 : 0 : of->user_data = user_data;
3883 : 0 : of->flags |= MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW;
3884 : 0 : rule_attr.user_data = of;
3885 : 0 : ret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,
3886 : : action_template_index, rule_acts, &rule_attr);
3887 [ # # ]: 0 : if (likely(!ret)) {
3888 : 0 : flow_hw_q_inc_flow_ops(priv, queue);
3889 : 0 : return 0;
3890 : : }
3891 : 0 : error:
3892 [ # # # # ]: 0 : if (table->resource && res_idx)
3893 : 0 : mlx5_ipool_free(table->resource, res_idx);
3894 : 0 : return rte_flow_error_set(error, rte_errno,
3895 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3896 : : "fail to update rte flow");
3897 : : }
3898 : :
3899 : : /**
3900 : : * Enqueue HW steering flow destruction.
3901 : : *
3902 : : * The flow will be applied to the HW only if the postpone bit is not set or
3903 : : * the extra push function is called.
3904 : : * The flow destruction status should be checked from dequeue result.
3905 : : *
3906 : : * @param[in] dev
3907 : : * Pointer to the rte_eth_dev structure.
3908 : : * @param[in] queue
3909 : : * The queue to destroy the flow.
3910 : : * @param[in] attr
3911 : : * Pointer to the flow operation attributes.
3912 : : * @param[in] flow
3913 : : * Pointer to the flow to be destroyed.
3914 : : * @param[in] user_data
3915 : : * Pointer to the user_data.
3916 : : * @param[out] error
3917 : : * Pointer to error structure.
3918 : : *
3919 : : * @return
3920 : : * 0 on success, negative value otherwise and rte_errno is set.
3921 : : */
3922 : : static int
3923 : 0 : flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
3924 : : uint32_t queue,
3925 : : const struct rte_flow_op_attr *attr,
3926 : : struct rte_flow *flow,
3927 : : void *user_data,
3928 : : struct rte_flow_error *error)
3929 : : {
3930 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3931 : 0 : struct mlx5dr_rule_attr rule_attr = {
3932 : : .queue_id = queue,
3933 : : .user_data = user_data,
3934 : 0 : .burst = attr->postpone,
3935 : : };
3936 : : struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
3937 : 0 : bool resizable = rte_flow_template_table_resizable(dev->data->port_id,
3938 : 0 : &fh->table->cfg.attr);
3939 : : int ret;
3940 : :
3941 [ # # ]: 0 : fh->operation_type = !resizable ?
3942 : : MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY :
3943 : : MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY;
3944 : 0 : fh->user_data = user_data;
3945 : 0 : rule_attr.user_data = fh;
3946 : 0 : rule_attr.rule_idx = fh->rule_idx;
3947 : 0 : ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
3948 [ # # ]: 0 : if (ret) {
3949 : 0 : return rte_flow_error_set(error, rte_errno,
3950 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3951 : : "fail to destroy rte flow");
3952 : : }
3953 : : flow_hw_q_inc_flow_ops(priv, queue);
3954 : 0 : return 0;
3955 : : }
3956 : :
3957 : : /**
3958 : : * Release the AGE and counter for given flow.
3959 : : *
3960 : : * @param[in] priv
3961 : : * Pointer to the port private data structure.
3962 : : * @param[in] queue
3963 : : * The queue to release the counter.
3964 : : * @param[in, out] flow
3965 : : * Pointer to the flow containing the counter.
3966 : : * @param[out] error
3967 : : * Pointer to error structure.
3968 : : */
3969 : : static void
3970 : 0 : flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
3971 : : struct rte_flow_hw *flow,
3972 : : struct rte_flow_error *error)
3973 : : {
3974 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(priv->dev_data->port_id, flow);
3975 : : uint32_t *cnt_queue;
3976 : 0 : uint32_t age_idx = aux->orig.age_idx;
3977 : :
3978 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
3979 [ # # # # ]: 0 : if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
3980 [ # # # # ]: 0 : if ((flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) &&
3981 : : !mlx5_hws_age_is_indirect(age_idx)) {
3982 : : /* Remove this AGE parameter from indirect counter. */
3983 : : mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
3984 : : /* Release the AGE parameter. */
3985 : 0 : mlx5_hws_age_action_destroy(priv, age_idx, error);
3986 : : }
3987 : 0 : return;
3988 : : }
3989 : : cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
3990 : : /* Put the counter first to reduce the race risk in BG thread. */
3991 [ # # ]: 0 : mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
3992 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) {
3993 [ # # ]: 0 : if (mlx5_hws_age_is_indirect(age_idx)) {
3994 : 0 : uint32_t idx = age_idx & MLX5_HWS_AGE_IDX_MASK;
3995 : :
3996 : : mlx5_hws_age_nb_cnt_decrease(priv, idx);
3997 : : } else {
3998 : : /* Release the AGE parameter. */
3999 : 0 : mlx5_hws_age_action_destroy(priv, age_idx, error);
4000 : : }
4001 : : }
4002 : : }
4003 : :
4004 : : static __rte_always_inline void
4005 : : flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
4006 : : uint32_t queue)
4007 : : {
4008 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4009 : : struct mlx5_aso_ct_action *aso_ct;
4010 : : struct mlx5_aso_mtr *aso_mtr;
4011 : : uint32_t type, idx;
4012 : :
4013 [ # # ]: 0 : if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
4014 : : MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
4015 : 0 : mlx5_quota_async_completion(dev, queue, job);
4016 [ # # ]: 0 : } else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
4017 : : type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4018 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4019 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4020 : 0 : mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
4021 : : }
4022 [ # # ]: 0 : } else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
4023 : : type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4024 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4025 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4026 : 0 : aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
4027 : 0 : aso_mtr->state = ASO_METER_READY;
4028 [ # # ]: 0 : } else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4029 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4030 : 0 : aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4031 : 0 : aso_ct->state = ASO_CONNTRACK_READY;
4032 : : }
4033 [ # # ]: 0 : } else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
4034 : : type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4035 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4036 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4037 : 0 : aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4038 : 0 : mlx5_aso_ct_obj_analyze(job->query.user,
4039 : 0 : job->query.hw);
4040 : 0 : aso_ct->state = ASO_CONNTRACK_READY;
4041 : : }
4042 : : }
4043 : : }
4044 : :
4045 : : static __rte_always_inline int
4046 : : mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,
4047 : : uint32_t queue, struct rte_flow_op_result res[],
4048 : : uint16_t n_res)
4049 : : {
4050 : : uint32_t size, i;
4051 : 0 : struct rte_flow_hw *flow = NULL;
4052 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4053 : 0 : struct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;
4054 : :
4055 : 0 : size = RTE_MIN(rte_ring_count(ring), n_res);
4056 [ # # ]: 0 : for (i = 0; i < size; i++) {
4057 [ # # # # : 0 : res[i].status = RTE_FLOW_OP_SUCCESS;
# ]
4058 : : rte_ring_dequeue(ring, (void **)&flow);
4059 : 0 : res[i].user_data = flow->user_data;
4060 : : flow_hw_q_dec_flow_ops(priv, queue);
4061 : : }
4062 : 0 : return (int)size;
4063 : : }
4064 : :
4065 : : static inline int
4066 : 0 : __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
4067 : : uint32_t queue,
4068 : : struct rte_flow_op_result res[],
4069 : : uint16_t n_res)
4070 : :
4071 : : {
4072 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4073 : 0 : struct rte_ring *r = priv->hw_q[queue].indir_cq;
4074 : 0 : void *user_data = NULL;
4075 : : int ret_comp, i;
4076 : :
4077 : 0 : ret_comp = (int)rte_ring_count(r);
4078 : 0 : if (ret_comp > n_res)
4079 : : ret_comp = n_res;
4080 [ # # ]: 0 : for (i = 0; i < ret_comp; i++) {
4081 : : rte_ring_dequeue(r, &user_data);
4082 : 0 : res[i].user_data = user_data;
4083 : 0 : res[i].status = RTE_FLOW_OP_SUCCESS;
4084 : : }
4085 [ # # ]: 0 : if (!priv->shared_host) {
4086 [ # # # # ]: 0 : if (ret_comp < n_res && priv->hws_mpool)
4087 : 0 : ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
4088 : 0 : &res[ret_comp], n_res - ret_comp);
4089 [ # # # # ]: 0 : if (ret_comp < n_res && priv->hws_ctpool)
4090 : 0 : ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
4091 : 0 : &res[ret_comp], n_res - ret_comp);
4092 : : }
4093 [ # # # # ]: 0 : if (ret_comp < n_res && priv->quota_ctx.sq)
4094 : 0 : ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
4095 : 0 : &res[ret_comp],
4096 : 0 : n_res - ret_comp);
4097 [ # # ]: 0 : for (i = 0; i < ret_comp; i++) {
4098 : 0 : struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
4099 : :
4100 : : /* Restore user data. */
4101 : 0 : res[i].user_data = job->user_data;
4102 [ # # ]: 0 : if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
4103 : : flow_hw_pull_legacy_indirect_comp(dev, job, queue);
4104 : : /*
4105 : : * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
4106 : : * These indirect list types do not post WQE to create action.
4107 : : * Future indirect list types that do post WQE will add
4108 : : * completion handlers here.
4109 : : */
4110 : : flow_hw_job_put(priv, job, queue);
4111 : : }
4112 : 0 : return ret_comp;
4113 : : }
4114 : :
4115 : : static __rte_always_inline void
4116 : : hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
4117 : : struct rte_flow_hw *flow,
4118 : : uint32_t queue, struct rte_flow_error *error)
4119 : : {
4120 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4121 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
4122 : 0 : struct rte_flow_template_table *table = flow->table;
4123 : : /* Release the original resource index in case of update. */
4124 : 0 : uint32_t res_idx = flow->res_idx;
4125 : :
4126 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAGS_ALL) {
4127 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4128 : :
4129 [ # # # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
4130 : 0 : flow_hw_jump_release(dev, flow->jump);
4131 [ # # # # ]: 0 : else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
4132 : 0 : mlx5_hrxq_obj_release(dev, flow->hrxq);
4133 [ # # # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
4134 : 0 : flow_hw_age_count_release(priv, queue, flow, error);
4135 [ # # # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
4136 : 0 : mlx5_ipool_free(pool->idx_pool, aux->orig.mtr_id);
4137 [ # # # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW) {
4138 [ # # # # ]: 0 : struct rte_flow_hw *upd_flow = &aux->upd_flow;
4139 : :
4140 : : rte_memcpy(flow, upd_flow, offsetof(struct rte_flow_hw, rule));
4141 : 0 : aux->orig = aux->upd;
4142 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4143 [ # # # # ]: 0 : if (table->resource)
4144 : 0 : mlx5_ipool_free(table->resource, res_idx);
4145 : : }
4146 : : }
4147 [ # # # # ]: 0 : if (flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY ||
4148 : : flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY) {
4149 [ # # # # ]: 0 : if (table->resource)
4150 : 0 : mlx5_ipool_free(table->resource, res_idx);
4151 : 0 : mlx5_ipool_free(table->flow, flow->idx);
4152 : : }
4153 : : }
4154 : :
4155 : : static __rte_always_inline void
4156 : : hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,
4157 : : struct rte_flow_hw *flow,
4158 : : uint32_t queue, enum rte_flow_op_status status,
4159 : : struct rte_flow_error *error)
4160 : : {
4161 : 0 : struct rte_flow_template_table *table = flow->table;
4162 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4163 : 0 : uint32_t selector = aux->matcher_selector;
4164 : 0 : uint32_t other_selector = (selector + 1) & 1;
4165 : :
4166 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
4167 [ # # # # ]: 0 : switch (flow->operation_type) {
4168 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4169 : 0 : rte_atomic_fetch_add_explicit
4170 : : (&table->matcher_info[selector].refcnt, 1,
4171 : : rte_memory_order_relaxed);
4172 : 0 : break;
4173 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4174 [ # # ]: 0 : rte_atomic_fetch_sub_explicit
4175 : : (&table->matcher_info[selector].refcnt, 1,
4176 : : rte_memory_order_relaxed);
4177 : : hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4178 : : break;
4179 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4180 [ # # ]: 0 : if (status == RTE_FLOW_OP_SUCCESS) {
4181 : 0 : rte_atomic_fetch_sub_explicit
4182 : : (&table->matcher_info[selector].refcnt, 1,
4183 : : rte_memory_order_relaxed);
4184 : 0 : rte_atomic_fetch_add_explicit
4185 : : (&table->matcher_info[other_selector].refcnt, 1,
4186 : : rte_memory_order_relaxed);
4187 : 0 : aux->matcher_selector = other_selector;
4188 : : }
4189 : : break;
4190 : : default:
4191 : : break;
4192 : : }
4193 : : }
4194 : :
4195 : : /**
4196 : : * Pull the enqueued flows.
4197 : : *
4198 : : * For flows enqueued from creation/destruction, the status should be
4199 : : * checked from the dequeue result.
4200 : : *
4201 : : * @param[in] dev
4202 : : * Pointer to the rte_eth_dev structure.
4203 : : * @param[in] queue
4204 : : * The queue to pull the result.
4205 : : * @param[in/out] res
4206 : : * Array to save the results.
4207 : : * @param[in] n_res
4208 : : * Available result with the array.
4209 : : * @param[out] error
4210 : : * Pointer to error structure.
4211 : : *
4212 : : * @return
4213 : : * Result number on success, negative value otherwise and rte_errno is set.
4214 : : */
4215 : : static int
4216 : 0 : flow_hw_pull(struct rte_eth_dev *dev,
4217 : : uint32_t queue,
4218 : : struct rte_flow_op_result res[],
4219 : : uint16_t n_res,
4220 : : struct rte_flow_error *error)
4221 : : {
4222 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4223 : : int ret, i;
4224 : :
4225 : : /* 1. Pull the flow completion. */
4226 : 0 : ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
4227 [ # # ]: 0 : if (ret < 0)
4228 : 0 : return rte_flow_error_set(error, rte_errno,
4229 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4230 : : "fail to query flow queue");
4231 [ # # ]: 0 : for (i = 0; i < ret; i++) {
4232 : 0 : struct rte_flow_hw *flow = res[i].user_data;
4233 : :
4234 : : /* Restore user data. */
4235 : 0 : res[i].user_data = flow->user_data;
4236 [ # # # ]: 0 : switch (flow->operation_type) {
4237 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY:
4238 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE:
4239 : : hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4240 : : break;
4241 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4242 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4243 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4244 : 0 : hw_cmpl_resizable_tbl(dev, flow, queue, res[i].status, error);
4245 : : break;
4246 : : default:
4247 : : break;
4248 : : }
4249 : : flow_hw_q_dec_flow_ops(priv, queue);
4250 : : }
4251 : : /* 2. Pull indirect action comp. */
4252 [ # # ]: 0 : if (ret < n_res)
4253 : 0 : ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
4254 : 0 : n_res - ret);
4255 [ # # ]: 0 : if (ret < n_res)
4256 : 0 : ret += mlx5_hw_pull_flow_transfer_comp(dev, queue, &res[ret],
4257 : 0 : n_res - ret);
4258 : :
4259 : : return ret;
4260 : : }
4261 : :
4262 : : static uint32_t
4263 : 0 : mlx5_hw_push_queue(struct rte_ring *pending_q, struct rte_ring *cmpl_q)
4264 : : {
4265 : 0 : void *job = NULL;
4266 : : uint32_t i, size = rte_ring_count(pending_q);
4267 : :
4268 [ # # ]: 0 : for (i = 0; i < size; i++) {
4269 : : rte_ring_dequeue(pending_q, &job);
4270 [ # # # # : 0 : rte_ring_enqueue(cmpl_q, job);
# ]
4271 : : }
4272 : 0 : return size;
4273 : : }
4274 : :
4275 : : static inline uint32_t
4276 : 0 : __flow_hw_push_action(struct rte_eth_dev *dev,
4277 : : uint32_t queue)
4278 : : {
4279 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4280 : 0 : struct mlx5_hw_q *hw_q = &priv->hw_q[queue];
4281 : :
4282 : 0 : mlx5_hw_push_queue(hw_q->indir_iq, hw_q->indir_cq);
4283 : 0 : mlx5_hw_push_queue(hw_q->flow_transfer_pending,
4284 : : hw_q->flow_transfer_completed);
4285 [ # # ]: 0 : if (!priv->shared_host) {
4286 [ # # ]: 0 : if (priv->hws_ctpool)
4287 : 0 : mlx5_aso_push_wqe(priv->sh,
4288 : 0 : &priv->ct_mng->aso_sqs[queue]);
4289 [ # # ]: 0 : if (priv->hws_mpool)
4290 : 0 : mlx5_aso_push_wqe(priv->sh,
4291 : 0 : &priv->hws_mpool->sq[queue]);
4292 : : }
4293 : 0 : return flow_hw_q_pending(priv, queue);
4294 : : }
4295 : :
4296 : : static int
4297 : 0 : __flow_hw_push(struct rte_eth_dev *dev,
4298 : : uint32_t queue,
4299 : : struct rte_flow_error *error)
4300 : : {
4301 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4302 : : int ret, num;
4303 : :
4304 : 0 : num = __flow_hw_push_action(dev, queue);
4305 : 0 : ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
4306 : : MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
4307 [ # # ]: 0 : if (ret) {
4308 : 0 : rte_flow_error_set(error, rte_errno,
4309 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4310 : : "fail to push flows");
4311 : 0 : return ret;
4312 : : }
4313 : : return num;
4314 : : }
4315 : :
4316 : : /**
4317 : : * Push the enqueued flows to HW.
4318 : : *
4319 : : * Force apply all the enqueued flows to the HW.
4320 : : *
4321 : : * @param[in] dev
4322 : : * Pointer to the rte_eth_dev structure.
4323 : : * @param[in] queue
4324 : : * The queue to push the flow.
4325 : : * @param[out] error
4326 : : * Pointer to error structure.
4327 : : *
4328 : : * @return
4329 : : * 0 on success, negative value otherwise and rte_errno is set.
4330 : : */
4331 : : static int
4332 : 0 : flow_hw_push(struct rte_eth_dev *dev,
4333 : : uint32_t queue, struct rte_flow_error *error)
4334 : : {
4335 : 0 : int ret = __flow_hw_push(dev, queue, error);
4336 : :
4337 : 0 : return ret >= 0 ? 0 : ret;
4338 : : }
4339 : :
4340 : : /**
4341 : : * Drain the enqueued flows' completion.
4342 : : *
4343 : : * @param[in] dev
4344 : : * Pointer to the rte_eth_dev structure.
4345 : : * @param[in] queue
4346 : : * The queue to pull the flow.
4347 : : * @param[out] error
4348 : : * Pointer to error structure.
4349 : : *
4350 : : * @return
4351 : : * 0 on success, negative value otherwise and rte_errno is set.
4352 : : */
4353 : : static int
4354 : 0 : __flow_hw_pull_comp(struct rte_eth_dev *dev,
4355 : : uint32_t queue, struct rte_flow_error *error)
4356 : : {
4357 : : struct rte_flow_op_result comp[BURST_THR];
4358 : : int ret, i, empty_loop = 0;
4359 : : uint32_t pending_rules;
4360 : :
4361 : 0 : ret = __flow_hw_push(dev, queue, error);
4362 [ # # ]: 0 : if (ret < 0)
4363 : : return ret;
4364 : 0 : pending_rules = ret;
4365 [ # # ]: 0 : while (pending_rules) {
4366 : 0 : ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
4367 [ # # ]: 0 : if (ret < 0)
4368 : : return -1;
4369 [ # # ]: 0 : if (!ret) {
4370 : 0 : rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
4371 [ # # ]: 0 : if (++empty_loop > 5) {
4372 : 0 : DRV_LOG(WARNING, "No available dequeue %u, quit.", pending_rules);
4373 : 0 : break;
4374 : : }
4375 : 0 : continue;
4376 : : }
4377 [ # # ]: 0 : for (i = 0; i < ret; i++) {
4378 [ # # ]: 0 : if (comp[i].status == RTE_FLOW_OP_ERROR)
4379 : 0 : DRV_LOG(WARNING, "Flow flush get error CQE.");
4380 : : }
4381 : : /*
4382 : : * Indirect **SYNC** METER_MARK and CT actions do not
4383 : : * remove completion after WQE post.
4384 : : * That implementation avoids HW timeout.
4385 : : * The completion is removed before the following WQE post.
4386 : : * However, HWS queue updates do not reflect that behaviour.
4387 : : * Therefore, during port destruction sync queue may have
4388 : : * pending completions.
4389 : : */
4390 : 0 : pending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);
4391 : : empty_loop = 0;
4392 : : }
4393 : : return 0;
4394 : : }
4395 : :
4396 : : /**
4397 : : * Flush created flows.
4398 : : *
4399 : : * @param[in] dev
4400 : : * Pointer to the rte_eth_dev structure.
4401 : : * @param[out] error
4402 : : * Pointer to error structure.
4403 : : *
4404 : : * @return
4405 : : * 0 on success, negative value otherwise and rte_errno is set.
4406 : : */
4407 : : int
4408 : 0 : flow_hw_q_flow_flush(struct rte_eth_dev *dev,
4409 : : struct rte_flow_error *error)
4410 : : {
4411 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4412 : 0 : struct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
4413 : : struct rte_flow_template_table *tbl;
4414 : : struct rte_flow_hw *flow;
4415 : 0 : struct rte_flow_op_attr attr = {
4416 : : .postpone = 0,
4417 : : };
4418 : : uint32_t pending_rules = 0;
4419 : : uint32_t queue;
4420 : : uint32_t fidx;
4421 : :
4422 : : /*
4423 : : * Ensure to push and dequeue all the enqueued flow
4424 : : * creation/destruction jobs in case user forgot to
4425 : : * dequeue. Or the enqueued created flows will be
4426 : : * leaked. The forgotten dequeues would also cause
4427 : : * flow flush get extra CQEs as expected and pending_rules
4428 : : * be minus value.
4429 : : */
4430 [ # # ]: 0 : for (queue = 0; queue < priv->nb_queue; queue++) {
4431 [ # # ]: 0 : if (__flow_hw_pull_comp(dev, queue, error))
4432 : : return -1;
4433 : : }
4434 : : /* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
4435 [ # # ]: 0 : LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
4436 [ # # ]: 0 : if (!tbl->cfg.external)
4437 : 0 : continue;
4438 [ # # ]: 0 : MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
4439 [ # # ]: 0 : if (flow_hw_async_flow_destroy(dev,
4440 : : MLX5_DEFAULT_FLUSH_QUEUE,
4441 : : &attr,
4442 : : (struct rte_flow *)flow,
4443 : : NULL,
4444 : : error))
4445 : : return -1;
4446 : 0 : pending_rules++;
4447 : : /* Drain completion with queue size. */
4448 [ # # ]: 0 : if (pending_rules >= hw_q->size) {
4449 [ # # ]: 0 : if (__flow_hw_pull_comp(dev,
4450 : : MLX5_DEFAULT_FLUSH_QUEUE,
4451 : : error))
4452 : : return -1;
4453 : : pending_rules = 0;
4454 : : }
4455 : : }
4456 : : }
4457 : : /* Drain left completion. */
4458 [ # # # # ]: 0 : if (pending_rules &&
4459 : 0 : __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))
4460 : 0 : return -1;
4461 : : return 0;
4462 : : }
4463 : :
4464 : : static int
4465 : 0 : mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
4466 : : struct rte_flow_template_table *tbl,
4467 : : struct mlx5_multi_pattern_segment *segment,
4468 : : uint32_t bulk_size,
4469 : : struct rte_flow_error *error)
4470 : : {
4471 : : int ret = 0;
4472 : : uint32_t i;
4473 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
4474 : : struct mlx5_tbl_multi_pattern_ctx *mpctx = &tbl->mpctx;
4475 : : const struct rte_flow_template_table_attr *table_attr = &tbl->cfg.attr;
4476 : : const struct rte_flow_attr *attr = &table_attr->flow_attr;
4477 : : enum mlx5dr_table_type type = get_mlx5dr_table_type(attr);
4478 : 0 : uint32_t flags = mlx5_hw_act_flag[!!attr->group][type];
4479 : : struct mlx5dr_action *dr_action = NULL;
4480 : :
4481 [ # # ]: 0 : for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
4482 [ # # ]: 0 : typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + i;
4483 : : enum mlx5dr_action_type reformat_type =
4484 : : mlx5_multi_pattern_reformat_index_to_type(i);
4485 : :
4486 [ # # ]: 0 : if (!reformat->elements_num)
4487 : 0 : continue;
4488 : : dr_action = reformat_type == MLX5DR_ACTION_TYP_INSERT_HEADER ?
4489 : : mlx5dr_action_create_insert_header
4490 : : (priv->dr_ctx, reformat->elements_num,
4491 : : reformat->insert_hdr, bulk_size, flags) :
4492 : 0 : mlx5dr_action_create_reformat
4493 : : (priv->dr_ctx, reformat_type, reformat->elements_num,
4494 : 0 : reformat->reformat_hdr, bulk_size, flags);
4495 [ # # ]: 0 : if (!dr_action) {
4496 : 0 : ret = rte_flow_error_set(error, rte_errno,
4497 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4498 : : NULL,
4499 : : "failed to create multi-pattern encap action");
4500 : 0 : goto error;
4501 : : }
4502 : 0 : segment->reformat_action[i] = dr_action;
4503 : : }
4504 [ # # ]: 0 : if (mpctx->mh.elements_num) {
4505 : : typeof(mpctx->mh) *mh = &mpctx->mh;
4506 : 0 : dr_action = mlx5dr_action_create_modify_header
4507 : 0 : (priv->dr_ctx, mpctx->mh.elements_num, mh->pattern,
4508 : : bulk_size, flags);
4509 [ # # ]: 0 : if (!dr_action) {
4510 : 0 : ret = rte_flow_error_set(error, rte_errno,
4511 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4512 : : NULL, "failed to create multi-pattern header modify action");
4513 : 0 : goto error;
4514 : : }
4515 : 0 : segment->mhdr_action = dr_action;
4516 : : }
4517 [ # # ]: 0 : if (dr_action) {
4518 : 0 : segment->capacity = RTE_BIT32(bulk_size);
4519 [ # # ]: 0 : if (segment != &mpctx->segments[MLX5_MAX_TABLE_RESIZE_NUM - 1])
4520 : 0 : segment[1].head_index = segment->head_index + segment->capacity;
4521 : : }
4522 : : return 0;
4523 : 0 : error:
4524 : 0 : mlx5_destroy_multi_pattern_segment(segment);
4525 : 0 : return ret;
4526 : : }
4527 : :
4528 : : static int
4529 : 0 : mlx5_hw_build_template_table(struct rte_eth_dev *dev,
4530 : : uint8_t nb_action_templates,
4531 : : struct rte_flow_actions_template *action_templates[],
4532 : : struct mlx5dr_action_template *at[],
4533 : : struct rte_flow_template_table *tbl,
4534 : : struct rte_flow_error *error)
4535 : : {
4536 : : int ret;
4537 : : uint8_t i;
4538 : :
4539 [ # # ]: 0 : for (i = 0; i < nb_action_templates; i++) {
4540 : 0 : uint32_t refcnt = __atomic_add_fetch(&action_templates[i]->refcnt, 1,
4541 : : __ATOMIC_RELAXED);
4542 : :
4543 [ # # ]: 0 : if (refcnt <= 1) {
4544 : 0 : rte_flow_error_set(error, EINVAL,
4545 : : RTE_FLOW_ERROR_TYPE_ACTION,
4546 : : &action_templates[i], "invalid AT refcount");
4547 : 0 : goto at_error;
4548 : : }
4549 : 0 : at[i] = action_templates[i]->tmpl;
4550 : 0 : tbl->ats[i].action_template = action_templates[i];
4551 : 0 : LIST_INIT(&tbl->ats[i].acts.act_list);
4552 : : /* do NOT translate table action if `dev` was not started */
4553 [ # # ]: 0 : if (!dev->data->dev_started)
4554 : 0 : continue;
4555 : 0 : ret = __flow_hw_actions_translate(dev, &tbl->cfg,
4556 : : &tbl->ats[i].acts,
4557 : : action_templates[i],
4558 : : &tbl->mpctx, error);
4559 [ # # ]: 0 : if (ret) {
4560 : 0 : i++;
4561 : 0 : goto at_error;
4562 : : }
4563 : 0 : flow_hw_populate_rule_acts_caches(dev, tbl, i);
4564 : : }
4565 [ # # ]: 0 : tbl->nb_action_templates = nb_action_templates;
4566 [ # # ]: 0 : if (mlx5_is_multi_pattern_active(&tbl->mpctx)) {
4567 [ # # ]: 0 : ret = mlx5_tbl_multi_pattern_process(dev, tbl,
4568 : : &tbl->mpctx.segments[0],
4569 : : rte_log2_u32(tbl->cfg.attr.nb_flows),
4570 : : error);
4571 [ # # ]: 0 : if (ret)
4572 : 0 : goto at_error;
4573 : : }
4574 : : return 0;
4575 : :
4576 : : at_error:
4577 [ # # ]: 0 : while (i--) {
4578 : 0 : __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4579 : 0 : __atomic_sub_fetch(&action_templates[i]->refcnt,
4580 : : 1, __ATOMIC_RELAXED);
4581 : : }
4582 : 0 : return rte_errno;
4583 : : }
4584 : :
4585 : : /**
4586 : : * Create flow table.
4587 : : *
4588 : : * The input item and action templates will be binded to the table.
4589 : : * Flow memory will also be allocated. Matcher will be created based
4590 : : * on the item template. Action will be translated to the dedicated
4591 : : * DR action if possible.
4592 : : *
4593 : : * @param[in] dev
4594 : : * Pointer to the rte_eth_dev structure.
4595 : : * @param[in] table_cfg
4596 : : * Pointer to the table configuration.
4597 : : * @param[in] item_templates
4598 : : * Item template array to be binded to the table.
4599 : : * @param[in] nb_item_templates
4600 : : * Number of item template.
4601 : : * @param[in] action_templates
4602 : : * Action template array to be binded to the table.
4603 : : * @param[in] nb_action_templates
4604 : : * Number of action template.
4605 : : * @param[out] error
4606 : : * Pointer to error structure.
4607 : : *
4608 : : * @return
4609 : : * Table on success, NULL otherwise and rte_errno is set.
4610 : : */
4611 : : static struct rte_flow_template_table *
4612 : 0 : flow_hw_table_create(struct rte_eth_dev *dev,
4613 : : const struct mlx5_flow_template_table_cfg *table_cfg,
4614 : : struct rte_flow_pattern_template *item_templates[],
4615 : : uint8_t nb_item_templates,
4616 : : struct rte_flow_actions_template *action_templates[],
4617 : : uint8_t nb_action_templates,
4618 : : struct rte_flow_error *error)
4619 : : {
4620 : 0 : struct rte_flow_error sub_error = {
4621 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
4622 : : .cause = NULL,
4623 : : .message = NULL,
4624 : : };
4625 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4626 : 0 : struct mlx5dr_matcher_attr matcher_attr = {0};
4627 : : struct rte_flow_template_table *tbl = NULL;
4628 : : struct mlx5_flow_group *grp;
4629 : : struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
4630 : : struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
4631 : 0 : const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
4632 : 0 : struct rte_flow_attr flow_attr = attr->flow_attr;
4633 : 0 : struct mlx5_flow_cb_ctx ctx = {
4634 : : .dev = dev,
4635 : : .error = &sub_error,
4636 : : .data = &flow_attr,
4637 : : };
4638 : 0 : struct mlx5_indexed_pool_config cfg = {
4639 : : .trunk_size = 1 << 12,
4640 : : .per_core_cache = 1 << 13,
4641 : : .need_lock = 1,
4642 : 0 : .release_mem_en = !!priv->sh->config.reclaim_mode,
4643 : : .malloc = mlx5_malloc,
4644 : : .free = mlx5_free,
4645 : : .type = "mlx5_hw_table_flow",
4646 : : };
4647 : : struct mlx5_list_entry *ge;
4648 : : uint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
4649 [ # # ]: 0 : uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
4650 : 0 : bool port_started = !!dev->data->dev_started;
4651 : : bool rpool_needed;
4652 : : size_t tbl_mem_size;
4653 : : int err;
4654 : :
4655 : : /* HWS layer accepts only 1 item template with root table. */
4656 [ # # ]: 0 : if (!attr->flow_attr.group)
4657 : : max_tpl = 1;
4658 : 0 : cfg.max_idx = nb_flows;
4659 [ # # ]: 0 : cfg.size = !rte_flow_template_table_resizable(dev->data->port_id, attr) ?
4660 : : mlx5_flow_hw_entry_size() :
4661 : : mlx5_flow_hw_auxed_entry_size();
4662 : : /* For table has very limited flows, disable cache. */
4663 [ # # ]: 0 : if (nb_flows < cfg.trunk_size) {
4664 : 0 : cfg.per_core_cache = 0;
4665 : 0 : cfg.trunk_size = nb_flows;
4666 [ # # ]: 0 : } else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
4667 : 0 : cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
4668 : : }
4669 : : /* Check if we requires too many templates. */
4670 [ # # # # ]: 0 : if (nb_item_templates > max_tpl ||
4671 : : nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
4672 : 0 : rte_errno = EINVAL;
4673 : 0 : goto error;
4674 : : }
4675 : : /*
4676 : : * Amount of memory required for rte_flow_template_table struct:
4677 : : * - Size of the struct itself.
4678 : : * - VLA of DR rule action containers at the end =
4679 : : * number of actions templates * number of queues * size of DR rule actions container.
4680 : : */
4681 : : tbl_mem_size = sizeof(*tbl);
4682 : 0 : tbl_mem_size += nb_action_templates * priv->nb_queue * sizeof(tbl->rule_acts[0]);
4683 : : /* Allocate the table memory. */
4684 : 0 : tbl = mlx5_malloc(MLX5_MEM_ZERO, tbl_mem_size, RTE_CACHE_LINE_SIZE, rte_socket_id());
4685 [ # # ]: 0 : if (!tbl)
4686 : 0 : goto error;
4687 : 0 : tbl->cfg = *table_cfg;
4688 : : /* Allocate flow indexed pool. */
4689 : 0 : tbl->flow = mlx5_ipool_create(&cfg);
4690 [ # # ]: 0 : if (!tbl->flow)
4691 : 0 : goto error;
4692 : : /* Allocate table of auxiliary flow rule structs. */
4693 : 0 : tbl->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux) * nb_flows,
4694 : 0 : RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
4695 [ # # ]: 0 : if (!tbl->flow_aux)
4696 : 0 : goto error;
4697 : : /* Register the flow group. */
4698 : 0 : ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
4699 [ # # ]: 0 : if (!ge)
4700 : 0 : goto error;
4701 : : grp = container_of(ge, struct mlx5_flow_group, entry);
4702 : 0 : tbl->grp = grp;
4703 : : /* Prepare matcher information. */
4704 : 0 : matcher_attr.resizable = !!rte_flow_template_table_resizable
4705 : 0 : (dev->data->port_id, &table_cfg->attr);
4706 : 0 : matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
4707 : 0 : matcher_attr.priority = attr->flow_attr.priority;
4708 : 0 : matcher_attr.optimize_using_rule_idx = true;
4709 : 0 : matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
4710 [ # # ]: 0 : matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type);
4711 [ # # ]: 0 : if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) {
4712 : 0 : DRV_LOG(ERR, "16-bit checksum hash type is not supported");
4713 : 0 : rte_errno = ENOTSUP;
4714 : 0 : goto it_error;
4715 : : }
4716 [ # # ]: 0 : matcher_attr.distribute_mode = flow_hw_matcher_distribute_mode_get(attr->hash_func);
4717 : 0 : matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
4718 : : /* Parse hints information. */
4719 [ # # ]: 0 : if (attr->specialize) {
4720 : : uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
4721 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
4722 : :
4723 [ # # ]: 0 : if ((attr->specialize & val) == val) {
4724 : 0 : DRV_LOG(ERR, "Invalid hint value %x",
4725 : : attr->specialize);
4726 : 0 : rte_errno = EINVAL;
4727 : 0 : goto it_error;
4728 : : }
4729 [ # # ]: 0 : if (attr->specialize &
4730 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
4731 : 0 : matcher_attr.optimize_flow_src =
4732 : : MLX5DR_MATCHER_FLOW_SRC_WIRE;
4733 [ # # ]: 0 : else if (attr->specialize &
4734 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
4735 : 0 : matcher_attr.optimize_flow_src =
4736 : : MLX5DR_MATCHER_FLOW_SRC_VPORT;
4737 : : }
4738 : : /* Build the item template. */
4739 [ # # ]: 0 : for (i = 0; i < nb_item_templates; i++) {
4740 : : uint32_t ret;
4741 : :
4742 [ # # # # ]: 0 : if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
4743 [ # # # # ]: 0 : (flow_attr.egress && !item_templates[i]->attr.egress) ||
4744 [ # # # # ]: 0 : (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
4745 : 0 : DRV_LOG(ERR, "pattern template and template table attribute mismatch");
4746 : 0 : rte_errno = EINVAL;
4747 : 0 : goto it_error;
4748 : : }
4749 [ # # ]: 0 : if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
4750 : 0 : matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
4751 : 0 : ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
4752 : : __ATOMIC_RELAXED) + 1;
4753 [ # # ]: 0 : if (ret <= 1) {
4754 : 0 : rte_errno = EINVAL;
4755 : 0 : goto it_error;
4756 : : }
4757 : 0 : mt[i] = item_templates[i]->mt;
4758 : 0 : tbl->its[i] = item_templates[i];
4759 : : }
4760 : 0 : tbl->nb_item_templates = nb_item_templates;
4761 : : /* Build the action template. */
4762 : 0 : err = mlx5_hw_build_template_table(dev, nb_action_templates,
4763 : : action_templates, at, tbl, &sub_error);
4764 [ # # ]: 0 : if (err) {
4765 : : i = nb_item_templates;
4766 : 0 : goto it_error;
4767 : : }
4768 : 0 : tbl->matcher_info[0].matcher = mlx5dr_matcher_create
4769 : 0 : (tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
4770 [ # # ]: 0 : if (!tbl->matcher_info[0].matcher)
4771 : 0 : goto at_error;
4772 : 0 : tbl->matcher_attr = matcher_attr;
4773 [ # # ]: 0 : tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
4774 : 0 : (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
4775 : : MLX5DR_TABLE_TYPE_NIC_RX);
4776 : : /*
4777 : : * Only the matcher supports update and needs more than 1 WQE, an additional
4778 : : * index is needed. Or else the flow index can be reused.
4779 : : */
4780 [ # # # # ]: 0 : rpool_needed = mlx5dr_matcher_is_updatable(tbl->matcher_info[0].matcher) &&
4781 : 0 : mlx5dr_matcher_is_dependent(tbl->matcher_info[0].matcher);
4782 [ # # ]: 0 : if (rpool_needed) {
4783 : : /* Allocate rule indexed pool. */
4784 : 0 : cfg.size = 0;
4785 : 0 : cfg.type = "mlx5_hw_table_rule";
4786 : 0 : cfg.max_idx += priv->hw_q[0].size;
4787 : 0 : tbl->resource = mlx5_ipool_create(&cfg);
4788 [ # # ]: 0 : if (!tbl->resource)
4789 : 0 : goto res_error;
4790 : : }
4791 [ # # ]: 0 : if (port_started)
4792 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
4793 : : else
4794 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
4795 : : rte_rwlock_init(&tbl->matcher_replace_rwlk);
4796 : 0 : return tbl;
4797 : : res_error:
4798 [ # # ]: 0 : if (tbl->matcher_info[0].matcher)
4799 : 0 : (void)mlx5dr_matcher_destroy(tbl->matcher_info[0].matcher);
4800 : 0 : at_error:
4801 [ # # ]: 0 : for (i = 0; i < nb_action_templates; i++) {
4802 : 0 : __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
4803 : 0 : __atomic_fetch_sub(&action_templates[i]->refcnt,
4804 : : 1, __ATOMIC_RELAXED);
4805 : : }
4806 : : i = nb_item_templates;
4807 : : it_error:
4808 [ # # ]: 0 : while (i--)
4809 : 0 : __atomic_fetch_sub(&item_templates[i]->refcnt,
4810 : : 1, __ATOMIC_RELAXED);
4811 : 0 : error:
4812 : 0 : err = rte_errno;
4813 [ # # ]: 0 : if (tbl) {
4814 [ # # ]: 0 : if (tbl->grp)
4815 : 0 : mlx5_hlist_unregister(priv->sh->groups,
4816 : : &tbl->grp->entry);
4817 [ # # ]: 0 : if (tbl->flow_aux)
4818 : 0 : mlx5_free(tbl->flow_aux);
4819 [ # # ]: 0 : if (tbl->flow)
4820 : 0 : mlx5_ipool_destroy(tbl->flow);
4821 : 0 : mlx5_free(tbl);
4822 : : }
4823 [ # # ]: 0 : if (error != NULL) {
4824 [ # # ]: 0 : if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
4825 : 0 : rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4826 : : "Failed to create template table");
4827 : : else
4828 : : rte_memcpy(error, &sub_error, sizeof(sub_error));
4829 : : }
4830 : : return NULL;
4831 : : }
4832 : :
4833 : : /**
4834 : : * Update flow template table.
4835 : : *
4836 : : * @param[in] dev
4837 : : * Pointer to the rte_eth_dev structure.
4838 : : * @param[out] error
4839 : : * Pointer to error structure.
4840 : : *
4841 : : * @return
4842 : : * 0 on success, negative value otherwise and rte_errno is set.
4843 : : */
4844 : : int
4845 : 0 : flow_hw_table_update(struct rte_eth_dev *dev,
4846 : : struct rte_flow_error *error)
4847 : : {
4848 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4849 : : struct rte_flow_template_table *tbl;
4850 : :
4851 [ # # ]: 0 : while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
4852 [ # # ]: 0 : if (flow_hw_actions_translate(dev, tbl, error))
4853 : : return -1;
4854 [ # # ]: 0 : LIST_REMOVE(tbl, next);
4855 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
4856 : : }
4857 : : return 0;
4858 : : }
4859 : :
4860 : : /**
4861 : : * Translates group index specified by the user in @p attr to internal
4862 : : * group index.
4863 : : *
4864 : : * Translation is done by incrementing group index, so group n becomes n + 1.
4865 : : *
4866 : : * @param[in] dev
4867 : : * Pointer to Ethernet device.
4868 : : * @param[in] cfg
4869 : : * Pointer to the template table configuration.
4870 : : * @param[in] group
4871 : : * Currently used group index (table group or jump destination).
4872 : : * @param[out] table_group
4873 : : * Pointer to output group index.
4874 : : * @param[out] error
4875 : : * Pointer to error structure.
4876 : : *
4877 : : * @return
4878 : : * 0 on success. Otherwise, returns negative error code, rte_errno is set
4879 : : * and error structure is filled.
4880 : : */
4881 : : static int
4882 : 0 : flow_hw_translate_group(struct rte_eth_dev *dev,
4883 : : const struct mlx5_flow_template_table_cfg *cfg,
4884 : : uint32_t group,
4885 : : uint32_t *table_group,
4886 : : struct rte_flow_error *error)
4887 : : {
4888 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4889 : 0 : struct mlx5_sh_config *config = &priv->sh->config;
4890 : : const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
4891 : :
4892 [ # # ]: 0 : if (config->dv_esw_en &&
4893 [ # # ]: 0 : priv->fdb_def_rule &&
4894 [ # # # # ]: 0 : cfg->external &&
4895 : : flow_attr->transfer) {
4896 [ # # ]: 0 : if (group > MLX5_HW_MAX_TRANSFER_GROUP)
4897 : 0 : return rte_flow_error_set(error, EINVAL,
4898 : : RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4899 : : NULL,
4900 : : "group index not supported");
4901 : 0 : *table_group = group + 1;
4902 [ # # ]: 0 : } else if (config->dv_esw_en &&
4903 [ # # # # ]: 0 : (config->repr_matching || config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) &&
4904 [ # # # # ]: 0 : cfg->external &&
4905 : : flow_attr->egress) {
4906 : : /*
4907 : : * On E-Switch setups, default egress flow rules are inserted to allow
4908 : : * representor matching and/or preserving metadata across steering domains.
4909 : : * These flow rules are inserted in group 0 and this group is reserved by PMD
4910 : : * for these purposes.
4911 : : *
4912 : : * As a result, if representor matching or extended metadata mode is enabled,
4913 : : * group provided by the user must be incremented to avoid inserting flow rules
4914 : : * in group 0.
4915 : : */
4916 [ # # ]: 0 : if (group > MLX5_HW_MAX_EGRESS_GROUP)
4917 : 0 : return rte_flow_error_set(error, EINVAL,
4918 : : RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4919 : : NULL,
4920 : : "group index not supported");
4921 : 0 : *table_group = group + 1;
4922 : : } else {
4923 : 0 : *table_group = group;
4924 : : }
4925 : : return 0;
4926 : : }
4927 : :
4928 : : /**
4929 : : * Create flow table.
4930 : : *
4931 : : * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
4932 : : * provided by user to proper internal values.
4933 : : *
4934 : : * @param[in] dev
4935 : : * Pointer to Ethernet device.
4936 : : * @param[in] attr
4937 : : * Pointer to the table attributes.
4938 : : * @param[in] item_templates
4939 : : * Item template array to be binded to the table.
4940 : : * @param[in] nb_item_templates
4941 : : * Number of item templates.
4942 : : * @param[in] action_templates
4943 : : * Action template array to be binded to the table.
4944 : : * @param[in] nb_action_templates
4945 : : * Number of action templates.
4946 : : * @param[out] error
4947 : : * Pointer to error structure.
4948 : : *
4949 : : * @return
4950 : : * Table on success, Otherwise, returns negative error code, rte_errno is set
4951 : : * and error structure is filled.
4952 : : */
4953 : : static struct rte_flow_template_table *
4954 : 0 : flow_hw_template_table_create(struct rte_eth_dev *dev,
4955 : : const struct rte_flow_template_table_attr *attr,
4956 : : struct rte_flow_pattern_template *item_templates[],
4957 : : uint8_t nb_item_templates,
4958 : : struct rte_flow_actions_template *action_templates[],
4959 : : uint8_t nb_action_templates,
4960 : : struct rte_flow_error *error)
4961 : : {
4962 : 0 : struct mlx5_flow_template_table_cfg cfg = {
4963 : : .attr = *attr,
4964 : : .external = true,
4965 : : };
4966 : 0 : uint32_t group = attr->flow_attr.group;
4967 : :
4968 [ # # ]: 0 : if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
4969 : : return NULL;
4970 [ # # # # ]: 0 : if (!cfg.attr.flow_attr.group &&
4971 : 0 : rte_flow_template_table_resizable(dev->data->port_id, attr)) {
4972 : 0 : rte_flow_error_set(error, EINVAL,
4973 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4974 : : "table cannot be resized: invalid group");
4975 : 0 : return NULL;
4976 : : }
4977 : 0 : return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
4978 : : action_templates, nb_action_templates, error);
4979 : : }
4980 : :
4981 : : static void
4982 : 0 : mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment)
4983 : : {
4984 : : int i;
4985 : :
4986 [ # # ]: 0 : if (segment->mhdr_action)
4987 : 0 : mlx5dr_action_destroy(segment->mhdr_action);
4988 [ # # ]: 0 : for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
4989 [ # # ]: 0 : if (segment->reformat_action[i])
4990 : 0 : mlx5dr_action_destroy(segment->reformat_action[i]);
4991 : : }
4992 : 0 : segment->capacity = 0;
4993 : 0 : }
4994 : :
4995 : : static void
4996 : : flow_hw_destroy_table_multi_pattern_ctx(struct rte_flow_template_table *table)
4997 : : {
4998 : : int sx;
4999 : :
5000 [ # # ]: 0 : for (sx = 0; sx < MLX5_MAX_TABLE_RESIZE_NUM; sx++)
5001 : 0 : mlx5_destroy_multi_pattern_segment(table->mpctx.segments + sx);
5002 : : }
5003 : : /**
5004 : : * Destroy flow table.
5005 : : *
5006 : : * @param[in] dev
5007 : : * Pointer to the rte_eth_dev structure.
5008 : : * @param[in] table
5009 : : * Pointer to the table to be destroyed.
5010 : : * @param[out] error
5011 : : * Pointer to error structure.
5012 : : *
5013 : : * @return
5014 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5015 : : */
5016 : : static int
5017 : 0 : flow_hw_table_destroy(struct rte_eth_dev *dev,
5018 : : struct rte_flow_template_table *table,
5019 : : struct rte_flow_error *error)
5020 : : {
5021 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5022 : : int i;
5023 : 0 : uint32_t fidx = 1;
5024 : 0 : uint32_t ridx = 1;
5025 : :
5026 : : /* Build ipool allocated object bitmap. */
5027 [ # # ]: 0 : if (table->resource)
5028 : 0 : mlx5_ipool_flush_cache(table->resource);
5029 : 0 : mlx5_ipool_flush_cache(table->flow);
5030 : : /* Check if ipool has allocated objects. */
5031 [ # # # # ]: 0 : if (table->refcnt ||
5032 : 0 : mlx5_ipool_get_next(table->flow, &fidx) ||
5033 [ # # # # ]: 0 : (table->resource && mlx5_ipool_get_next(table->resource, &ridx))) {
5034 : 0 : DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
5035 : 0 : return rte_flow_error_set(error, EBUSY,
5036 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5037 : : NULL,
5038 : : "table in use");
5039 : : }
5040 [ # # ]: 0 : LIST_REMOVE(table, next);
5041 [ # # ]: 0 : for (i = 0; i < table->nb_item_templates; i++)
5042 : 0 : __atomic_fetch_sub(&table->its[i]->refcnt,
5043 : : 1, __ATOMIC_RELAXED);
5044 [ # # ]: 0 : for (i = 0; i < table->nb_action_templates; i++) {
5045 : 0 : __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
5046 : 0 : __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
5047 : : 1, __ATOMIC_RELAXED);
5048 : : }
5049 : : flow_hw_destroy_table_multi_pattern_ctx(table);
5050 [ # # ]: 0 : if (table->matcher_info[0].matcher)
5051 : 0 : mlx5dr_matcher_destroy(table->matcher_info[0].matcher);
5052 [ # # ]: 0 : if (table->matcher_info[1].matcher)
5053 : 0 : mlx5dr_matcher_destroy(table->matcher_info[1].matcher);
5054 : 0 : mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
5055 [ # # ]: 0 : if (table->resource)
5056 : 0 : mlx5_ipool_destroy(table->resource);
5057 : 0 : mlx5_free(table->flow_aux);
5058 : 0 : mlx5_ipool_destroy(table->flow);
5059 : 0 : mlx5_free(table);
5060 : 0 : return 0;
5061 : : }
5062 : :
5063 : : /**
5064 : : * Parse group's miss actions.
5065 : : *
5066 : : * @param[in] dev
5067 : : * Pointer to the rte_eth_dev structure.
5068 : : * @param[in] cfg
5069 : : * Pointer to the table_cfg structure.
5070 : : * @param[in] actions
5071 : : * Array of actions to perform on group miss. Supported types:
5072 : : * RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5073 : : * @param[out] dst_group_id
5074 : : * Pointer to destination group id output. will be set to 0 if actions is END,
5075 : : * otherwise will be set to destination group id.
5076 : : * @param[out] error
5077 : : * Pointer to error structure.
5078 : : *
5079 : : * @return
5080 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5081 : : */
5082 : :
5083 : : static int
5084 : 0 : flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,
5085 : : struct mlx5_flow_template_table_cfg *cfg,
5086 : : const struct rte_flow_action actions[],
5087 : : uint32_t *dst_group_id,
5088 : : struct rte_flow_error *error)
5089 : : {
5090 : : const struct rte_flow_action_jump *jump_conf;
5091 : 0 : uint32_t temp = 0;
5092 : : uint32_t i;
5093 : :
5094 [ # # ]: 0 : for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
5095 [ # # # ]: 0 : switch (actions[i].type) {
5096 : 0 : case RTE_FLOW_ACTION_TYPE_VOID:
5097 : 0 : continue;
5098 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
5099 [ # # ]: 0 : if (temp)
5100 : 0 : return rte_flow_error_set(error, ENOTSUP,
5101 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,
5102 : : "Miss actions can contain only a single JUMP");
5103 : :
5104 : 0 : jump_conf = (const struct rte_flow_action_jump *)actions[i].conf;
5105 [ # # ]: 0 : if (!jump_conf)
5106 : 0 : return rte_flow_error_set(error, EINVAL,
5107 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5108 : : jump_conf, "Jump conf must not be NULL");
5109 : :
5110 [ # # ]: 0 : if (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))
5111 : 0 : return -rte_errno;
5112 : :
5113 [ # # ]: 0 : if (!temp)
5114 : 0 : return rte_flow_error_set(error, EINVAL,
5115 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5116 : : "Failed to set group miss actions - Invalid target group");
5117 : : break;
5118 : 0 : default:
5119 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5120 : : &actions[i], "Unsupported default miss action type");
5121 : : }
5122 : : }
5123 : :
5124 : 0 : *dst_group_id = temp;
5125 : 0 : return 0;
5126 : : }
5127 : :
5128 : : /**
5129 : : * Set group's miss group.
5130 : : *
5131 : : * @param[in] dev
5132 : : * Pointer to the rte_eth_dev structure.
5133 : : * @param[in] cfg
5134 : : * Pointer to the table_cfg structure.
5135 : : * @param[in] src_grp
5136 : : * Pointer to source group structure.
5137 : : * if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.
5138 : : * @param[in] dst_grp
5139 : : * Pointer to destination group structure.
5140 : : * @param[out] error
5141 : : * Pointer to error structure.
5142 : : *
5143 : : * @return
5144 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5145 : : */
5146 : :
5147 : : static int
5148 : 0 : flow_hw_group_set_miss_group(struct rte_eth_dev *dev,
5149 : : struct mlx5_flow_template_table_cfg *cfg,
5150 : : struct mlx5_flow_group *src_grp,
5151 : : struct mlx5_flow_group *dst_grp,
5152 : : struct rte_flow_error *error)
5153 : : {
5154 : 0 : struct rte_flow_error sub_error = {
5155 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
5156 : : .cause = NULL,
5157 : : .message = NULL,
5158 : : };
5159 : 0 : struct mlx5_flow_cb_ctx ctx = {
5160 : : .dev = dev,
5161 : : .error = &sub_error,
5162 : 0 : .data = &cfg->attr.flow_attr,
5163 : : };
5164 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5165 : : struct mlx5_list_entry *ge;
5166 : : bool ref = false;
5167 : : int ret;
5168 : :
5169 [ # # ]: 0 : if (!dst_grp)
5170 : : return -EINVAL;
5171 : :
5172 : : /* If group doesn't exist - needs to be created. */
5173 [ # # ]: 0 : if (!src_grp) {
5174 : 0 : ge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);
5175 [ # # ]: 0 : if (!ge)
5176 : 0 : return -rte_errno;
5177 : :
5178 : : src_grp = container_of(ge, struct mlx5_flow_group, entry);
5179 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5180 : : ref = true;
5181 [ # # ]: 0 : } else if (!src_grp->miss_group) {
5182 : : /* If group exists, but has no miss actions - need to increase ref_cnt. */
5183 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5184 : 0 : src_grp->entry.ref_cnt++;
5185 : : ref = true;
5186 : : }
5187 : :
5188 : 0 : ret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);
5189 [ # # ]: 0 : if (ret)
5190 : 0 : goto mlx5dr_error;
5191 : :
5192 : : /* If group existed and had old miss actions - ref_cnt is already correct.
5193 : : * However, need to reduce ref counter for old miss group.
5194 : : */
5195 [ # # ]: 0 : if (src_grp->miss_group)
5196 : 0 : mlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);
5197 : :
5198 : 0 : src_grp->miss_group = dst_grp;
5199 : 0 : return 0;
5200 : :
5201 : : mlx5dr_error:
5202 : : /* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */
5203 [ # # ]: 0 : if (ref) {
5204 : 0 : mlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);
5205 [ # # ]: 0 : LIST_REMOVE(src_grp, next);
5206 : : }
5207 : :
5208 : 0 : return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5209 : : "Failed to set group miss actions");
5210 : : }
5211 : :
5212 : : /**
5213 : : * Unset group's miss group.
5214 : : *
5215 : : * @param[in] dev
5216 : : * Pointer to the rte_eth_dev structure.
5217 : : * @param[in] grp
5218 : : * Pointer to group structure.
5219 : : * @param[out] error
5220 : : * Pointer to error structure.
5221 : : *
5222 : : * @return
5223 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5224 : : */
5225 : :
5226 : : static int
5227 : 0 : flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,
5228 : : struct mlx5_flow_group *grp,
5229 : : struct rte_flow_error *error)
5230 : : {
5231 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5232 : : int ret;
5233 : :
5234 : : /* If group doesn't exist - no need to change anything. */
5235 [ # # ]: 0 : if (!grp)
5236 : : return 0;
5237 : :
5238 : : /* If group exists, but miss actions is already default behavior -
5239 : : * no need to change anything.
5240 : : */
5241 [ # # ]: 0 : if (!grp->miss_group)
5242 : : return 0;
5243 : :
5244 : 0 : ret = mlx5dr_table_set_default_miss(grp->tbl, NULL);
5245 [ # # ]: 0 : if (ret)
5246 : 0 : return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5247 : : "Failed to unset group miss actions");
5248 : :
5249 : 0 : mlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);
5250 : 0 : grp->miss_group = NULL;
5251 : :
5252 [ # # ]: 0 : LIST_REMOVE(grp, next);
5253 : 0 : mlx5_hlist_unregister(priv->sh->groups, &grp->entry);
5254 : :
5255 : 0 : return 0;
5256 : : }
5257 : :
5258 : : /**
5259 : : * Set group miss actions.
5260 : : *
5261 : : * @param[in] dev
5262 : : * Pointer to the rte_eth_dev structure.
5263 : : * @param[in] group_id
5264 : : * Group id.
5265 : : * @param[in] attr
5266 : : * Pointer to group attributes structure.
5267 : : * @param[in] actions
5268 : : * Array of actions to perform on group miss. Supported types:
5269 : : * RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5270 : : * @param[out] error
5271 : : * Pointer to error structure.
5272 : : *
5273 : : * @return
5274 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5275 : : */
5276 : :
5277 : : static int
5278 : 0 : flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,
5279 : : uint32_t group_id,
5280 : : const struct rte_flow_group_attr *attr,
5281 : : const struct rte_flow_action actions[],
5282 : : struct rte_flow_error *error)
5283 : : {
5284 : 0 : struct rte_flow_error sub_error = {
5285 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
5286 : : .cause = NULL,
5287 : : .message = NULL,
5288 : : };
5289 : 0 : struct mlx5_flow_template_table_cfg cfg = {
5290 : : .external = true,
5291 : : .attr = {
5292 : : .flow_attr = {
5293 : : .group = group_id,
5294 : 0 : .ingress = attr->ingress,
5295 : 0 : .egress = attr->egress,
5296 : 0 : .transfer = attr->transfer,
5297 : : },
5298 : : },
5299 : : };
5300 : 0 : struct mlx5_flow_cb_ctx ctx = {
5301 : : .dev = dev,
5302 : : .error = &sub_error,
5303 : : .data = &cfg.attr.flow_attr,
5304 : : };
5305 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5306 : : struct mlx5_flow_group *src_grp = NULL;
5307 : : struct mlx5_flow_group *dst_grp = NULL;
5308 : : struct mlx5_list_entry *ge;
5309 : 0 : uint32_t dst_group_id = 0;
5310 : : int ret;
5311 : :
5312 [ # # ]: 0 : if (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))
5313 : 0 : return -rte_errno;
5314 : :
5315 [ # # ]: 0 : if (!group_id)
5316 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5317 : : NULL, "Failed to set group miss actions - invalid group id");
5318 : :
5319 : 0 : ret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);
5320 [ # # ]: 0 : if (ret)
5321 : 0 : return -rte_errno;
5322 : :
5323 [ # # ]: 0 : if (dst_group_id == group_id) {
5324 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5325 : : NULL, "Failed to set group miss actions - target group id must differ from group_id");
5326 : : }
5327 : :
5328 : 0 : cfg.attr.flow_attr.group = group_id;
5329 : 0 : ge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);
5330 [ # # ]: 0 : if (ge)
5331 : : src_grp = container_of(ge, struct mlx5_flow_group, entry);
5332 : :
5333 [ # # ]: 0 : if (dst_group_id) {
5334 : : /* Increase ref_cnt for new miss group. */
5335 : 0 : cfg.attr.flow_attr.group = dst_group_id;
5336 : 0 : ge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);
5337 [ # # ]: 0 : if (!ge)
5338 : 0 : return -rte_errno;
5339 : :
5340 : : dst_grp = container_of(ge, struct mlx5_flow_group, entry);
5341 : :
5342 : 0 : cfg.attr.flow_attr.group = group_id;
5343 : 0 : ret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);
5344 [ # # ]: 0 : if (ret)
5345 : 0 : goto error;
5346 : : } else {
5347 : 0 : return flow_hw_group_unset_miss_group(dev, src_grp, error);
5348 : : }
5349 : :
5350 : : return 0;
5351 : :
5352 : : error:
5353 : : if (dst_grp)
5354 : 0 : mlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);
5355 : 0 : return -rte_errno;
5356 : : }
5357 : :
5358 : : static bool
5359 : : flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
5360 : : enum rte_flow_field_id field)
5361 : : {
5362 [ # # # # : 0 : return action->src.field == field || action->dst.field == field;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # ]
5363 : : }
5364 : :
5365 : : static bool
5366 : : flow_hw_modify_field_is_geneve_opt(enum rte_flow_field_id field)
5367 : : {
5368 : : return field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE ||
5369 : 0 : field == RTE_FLOW_FIELD_GENEVE_OPT_CLASS ||
5370 : : field == RTE_FLOW_FIELD_GENEVE_OPT_DATA;
5371 : : }
5372 : :
5373 : : static bool
5374 : 0 : flow_hw_modify_field_is_add_dst_valid(const struct rte_flow_action_modify_field *conf)
5375 : : {
5376 [ # # ]: 0 : if (conf->operation != RTE_FLOW_MODIFY_ADD)
5377 : : return true;
5378 [ # # ]: 0 : if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
5379 : : conf->src.field == RTE_FLOW_FIELD_VALUE)
5380 : : return true;
5381 [ # # ]: 0 : switch (conf->dst.field) {
5382 : : case RTE_FLOW_FIELD_IPV4_TTL:
5383 : : case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5384 : : case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5385 : : case RTE_FLOW_FIELD_TCP_ACK_NUM:
5386 : : case RTE_FLOW_FIELD_TAG:
5387 : : case RTE_FLOW_FIELD_META:
5388 : : case RTE_FLOW_FIELD_FLEX_ITEM:
5389 : : case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5390 : : case RTE_FLOW_FIELD_IPV4_IHL:
5391 : : case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5392 : : case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5393 : : return true;
5394 : : default:
5395 : : break;
5396 : : }
5397 : 0 : return false;
5398 : : }
5399 : :
5400 : : /**
5401 : : * Validate the level value for modify field action.
5402 : : *
5403 : : * @param[in] data
5404 : : * Pointer to the rte_flow_field_data structure either src or dst.
5405 : : * @param[in] inner_supported
5406 : : * Indicator whether inner should be supported.
5407 : : * @param[out] error
5408 : : * Pointer to error structure.
5409 : : *
5410 : : * @return
5411 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5412 : : */
5413 : : static int
5414 : 0 : flow_hw_validate_modify_field_level(const struct rte_flow_field_data *data,
5415 : : bool inner_supported,
5416 : : struct rte_flow_error *error)
5417 : : {
5418 [ # # # # : 0 : switch ((int)data->field) {
# ]
5419 : : case RTE_FLOW_FIELD_START:
5420 : : case RTE_FLOW_FIELD_VLAN_TYPE:
5421 : : case RTE_FLOW_FIELD_RANDOM:
5422 : : case RTE_FLOW_FIELD_FLEX_ITEM:
5423 : : /*
5424 : : * Level shouldn't be valid since field isn't supported or
5425 : : * doesn't use 'level'.
5426 : : */
5427 : : break;
5428 : : case RTE_FLOW_FIELD_MARK:
5429 : : case RTE_FLOW_FIELD_META:
5430 : : case RTE_FLOW_FIELD_METER_COLOR:
5431 : : case RTE_FLOW_FIELD_HASH_RESULT:
5432 : : /* For meta data fields encapsulation level is don't-care. */
5433 : : break;
5434 : 0 : case RTE_FLOW_FIELD_TAG:
5435 : : case MLX5_RTE_FLOW_FIELD_META_REG:
5436 : : /*
5437 : : * The tag array for RTE_FLOW_FIELD_TAG type is provided using
5438 : : * 'tag_index' field. In old API, it was provided using 'level'
5439 : : * field and it is still supported for backwards compatibility.
5440 : : * Therefore, for meta tag field only, level is matter. It is
5441 : : * taken as tag index when 'tag_index' field isn't set, and
5442 : : * return error otherwise.
5443 : : */
5444 [ # # ]: 0 : if (data->level > 0) {
5445 [ # # ]: 0 : if (data->tag_index > 0)
5446 : 0 : return rte_flow_error_set(error, EINVAL,
5447 : : RTE_FLOW_ERROR_TYPE_ACTION,
5448 : : data,
5449 : : "tag array can be provided using 'level' or 'tag_index' fields, not both");
5450 : 0 : DRV_LOG(WARNING,
5451 : : "tag array provided in 'level' field instead of 'tag_index' field.");
5452 : : }
5453 : : break;
5454 : 0 : case RTE_FLOW_FIELD_MAC_DST:
5455 : : case RTE_FLOW_FIELD_MAC_SRC:
5456 : : case RTE_FLOW_FIELD_MAC_TYPE:
5457 : : case RTE_FLOW_FIELD_IPV4_IHL:
5458 : : case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
5459 : : case RTE_FLOW_FIELD_IPV4_DSCP:
5460 : : case RTE_FLOW_FIELD_IPV4_ECN:
5461 : : case RTE_FLOW_FIELD_IPV4_TTL:
5462 : : case RTE_FLOW_FIELD_IPV4_SRC:
5463 : : case RTE_FLOW_FIELD_IPV4_DST:
5464 : : case RTE_FLOW_FIELD_IPV6_TRAFFIC_CLASS:
5465 : : case RTE_FLOW_FIELD_IPV6_FLOW_LABEL:
5466 : : case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
5467 : : case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
5468 : : case RTE_FLOW_FIELD_IPV6_SRC:
5469 : : case RTE_FLOW_FIELD_IPV6_DST:
5470 : : case RTE_FLOW_FIELD_TCP_PORT_SRC:
5471 : : case RTE_FLOW_FIELD_TCP_PORT_DST:
5472 : : case RTE_FLOW_FIELD_TCP_FLAGS:
5473 : : case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
5474 : : case RTE_FLOW_FIELD_UDP_PORT_SRC:
5475 : : case RTE_FLOW_FIELD_UDP_PORT_DST:
5476 [ # # ]: 0 : if (data->level > 2)
5477 : 0 : return rte_flow_error_set(error, ENOTSUP,
5478 : : RTE_FLOW_ERROR_TYPE_ACTION,
5479 : : data,
5480 : : "second inner header fields modification is not supported");
5481 [ # # ]: 0 : if (inner_supported)
5482 : : break;
5483 : : /* Fallthrough */
5484 : : case RTE_FLOW_FIELD_VLAN_ID:
5485 : : case RTE_FLOW_FIELD_IPV4_PROTO:
5486 : : case RTE_FLOW_FIELD_IPV6_PROTO:
5487 : : case RTE_FLOW_FIELD_IPV6_DSCP:
5488 : : case RTE_FLOW_FIELD_IPV6_ECN:
5489 : : case RTE_FLOW_FIELD_TCP_SEQ_NUM:
5490 : : case RTE_FLOW_FIELD_TCP_ACK_NUM:
5491 : : case RTE_FLOW_FIELD_ESP_PROTO:
5492 : : case RTE_FLOW_FIELD_ESP_SPI:
5493 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
5494 : : case RTE_FLOW_FIELD_VXLAN_VNI:
5495 : : case RTE_FLOW_FIELD_GENEVE_VNI:
5496 : : case RTE_FLOW_FIELD_GENEVE_OPT_TYPE:
5497 : : case RTE_FLOW_FIELD_GENEVE_OPT_CLASS:
5498 : : case RTE_FLOW_FIELD_GENEVE_OPT_DATA:
5499 : : case RTE_FLOW_FIELD_GTP_TEID:
5500 : : case RTE_FLOW_FIELD_GTP_PSC_QFI:
5501 [ # # ]: 0 : if (data->level > 1)
5502 : 0 : return rte_flow_error_set(error, ENOTSUP,
5503 : : RTE_FLOW_ERROR_TYPE_ACTION,
5504 : : data,
5505 : : "inner header fields modification is not supported");
5506 : : break;
5507 : 0 : case RTE_FLOW_FIELD_MPLS:
5508 [ # # ]: 0 : if (data->level == 1)
5509 : 0 : return rte_flow_error_set(error, ENOTSUP,
5510 : : RTE_FLOW_ERROR_TYPE_ACTION,
5511 : : data,
5512 : : "outer MPLS header modification is not supported");
5513 [ # # ]: 0 : if (data->level > 2)
5514 : 0 : return rte_flow_error_set(error, ENOTSUP,
5515 : : RTE_FLOW_ERROR_TYPE_ACTION,
5516 : : data,
5517 : : "inner MPLS header modification is not supported");
5518 : : break;
5519 : 0 : case RTE_FLOW_FIELD_POINTER:
5520 : : case RTE_FLOW_FIELD_VALUE:
5521 : : default:
5522 : : MLX5_ASSERT(false);
5523 : : }
5524 : : return 0;
5525 : : }
5526 : :
5527 : : static int
5528 : 0 : flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
5529 : : const struct rte_flow_action *action,
5530 : : const struct rte_flow_action *mask,
5531 : : struct rte_flow_error *error)
5532 : : {
5533 : 0 : const struct rte_flow_action_modify_field *action_conf = action->conf;
5534 : 0 : const struct rte_flow_action_modify_field *mask_conf = mask->conf;
5535 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5536 : 0 : struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
5537 : : int ret;
5538 : :
5539 [ # # ]: 0 : if (!mask_conf)
5540 : 0 : return rte_flow_error_set(error, EINVAL,
5541 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5542 : : "modify_field mask conf is missing");
5543 [ # # ]: 0 : if (action_conf->operation != mask_conf->operation)
5544 : 0 : return rte_flow_error_set(error, EINVAL,
5545 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5546 : : "modify_field operation mask and template are not equal");
5547 [ # # ]: 0 : if (action_conf->dst.field != mask_conf->dst.field)
5548 : 0 : return rte_flow_error_set(error, EINVAL,
5549 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5550 : : "destination field mask and template are not equal");
5551 : 0 : if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
5552 [ # # ]: 0 : action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
5553 : : action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
5554 : 0 : return rte_flow_error_set(error, EINVAL,
5555 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5556 : : "immediate value, pointer and hash result cannot be used as destination");
5557 : 0 : ret = flow_hw_validate_modify_field_level(&action_conf->dst, false, error);
5558 [ # # ]: 0 : if (ret)
5559 : : return ret;
5560 [ # # # # ]: 0 : if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM &&
5561 : : !flow_hw_modify_field_is_geneve_opt(action_conf->dst.field)) {
5562 [ # # ]: 0 : if (action_conf->dst.tag_index &&
5563 : : !flow_modify_field_support_tag_array(action_conf->dst.field))
5564 : 0 : return rte_flow_error_set(error, EINVAL,
5565 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5566 : : "destination tag index is not supported");
5567 [ # # ]: 0 : if (action_conf->dst.class_id)
5568 : 0 : return rte_flow_error_set(error, EINVAL,
5569 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5570 : : "destination class id is not supported");
5571 : : }
5572 [ # # ]: 0 : if (mask_conf->dst.level != UINT8_MAX)
5573 : 0 : return rte_flow_error_set(error, EINVAL,
5574 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5575 : : "destination encapsulation level must be fully masked");
5576 [ # # ]: 0 : if (mask_conf->dst.offset != UINT32_MAX)
5577 : 0 : return rte_flow_error_set(error, EINVAL,
5578 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5579 : : "destination offset level must be fully masked");
5580 [ # # ]: 0 : if (action_conf->src.field != mask_conf->src.field)
5581 : 0 : return rte_flow_error_set(error, EINVAL,
5582 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5583 : : "destination field mask and template are not equal");
5584 [ # # ]: 0 : if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
5585 : : action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
5586 [ # # # # ]: 0 : if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM &&
5587 : : !flow_hw_modify_field_is_geneve_opt(action_conf->src.field)) {
5588 [ # # ]: 0 : if (action_conf->src.tag_index &&
5589 : : !flow_modify_field_support_tag_array(action_conf->src.field))
5590 : 0 : return rte_flow_error_set(error, EINVAL,
5591 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5592 : : "source tag index is not supported");
5593 [ # # ]: 0 : if (action_conf->src.class_id)
5594 : 0 : return rte_flow_error_set(error, EINVAL,
5595 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5596 : : "source class id is not supported");
5597 : : }
5598 [ # # ]: 0 : if (mask_conf->src.level != UINT8_MAX)
5599 : 0 : return rte_flow_error_set(error, EINVAL,
5600 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5601 : : "source encapsulation level must be fully masked");
5602 [ # # ]: 0 : if (mask_conf->src.offset != UINT32_MAX)
5603 : 0 : return rte_flow_error_set(error, EINVAL,
5604 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5605 : : "source offset level must be fully masked");
5606 : 0 : ret = flow_hw_validate_modify_field_level(&action_conf->src, true, error);
5607 [ # # ]: 0 : if (ret)
5608 : : return ret;
5609 : : }
5610 [ # # ]: 0 : if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
5611 [ # # # # ]: 0 : action_conf->dst.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
5612 : 0 : action_conf->dst.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ||
5613 [ # # ]: 0 : (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
5614 [ # # # # ]: 0 : action_conf->src.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
5615 : : action_conf->src.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX))
5616 : 0 : return rte_flow_error_set(error, EINVAL,
5617 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5618 : : "tag index is out of range");
5619 [ # # # # ]: 0 : if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
5620 [ # # # # ]: 0 : flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->dst.tag_index) == REG_NON) ||
5621 [ # # ]: 0 : (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
5622 [ # # ]: 0 : flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->src.tag_index) == REG_NON))
5623 : 0 : return rte_flow_error_set(error, EINVAL,
5624 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5625 : : "tag index is out of range");
5626 [ # # ]: 0 : if (mask_conf->width != UINT32_MAX)
5627 : 0 : return rte_flow_error_set(error, EINVAL,
5628 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5629 : : "modify_field width field must be fully masked");
5630 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
5631 : 0 : return rte_flow_error_set(error, EINVAL,
5632 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5633 : : "modifying arbitrary place in a packet is not supported");
5634 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
5635 : 0 : return rte_flow_error_set(error, EINVAL,
5636 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5637 : : "modifying vlan_type is not supported");
5638 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_RANDOM))
5639 : 0 : return rte_flow_error_set(error, EINVAL,
5640 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5641 : : "modifying random value is not supported");
5642 : : /**
5643 : : * Geneve VNI modification is supported only when Geneve header is
5644 : : * parsed natively. When GENEVE options are supported, they both Geneve
5645 : : * and options headers are parsed as a flex parser.
5646 : : */
5647 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI) &&
5648 [ # # ]: 0 : attr->geneve_tlv_opt)
5649 : 0 : return rte_flow_error_set(error, EINVAL,
5650 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5651 : : "modifying Geneve VNI is not supported when GENEVE opt is supported");
5652 [ # # # # ]: 0 : if (priv->tlv_options == NULL &&
5653 [ # # ]: 0 : (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_TYPE) ||
5654 [ # # ]: 0 : flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_CLASS) ||
5655 : : flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_DATA)))
5656 : 0 : return rte_flow_error_set(error, EINVAL,
5657 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5658 : : "modifying Geneve TLV option is supported only after parser configuration");
5659 : : /* Due to HW bug, tunnel MPLS header is read only. */
5660 [ # # ]: 0 : if (action_conf->dst.field == RTE_FLOW_FIELD_MPLS)
5661 : 0 : return rte_flow_error_set(error, EINVAL,
5662 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5663 : : "MPLS cannot be used as destination");
5664 : : /* ADD_FIELD is not supported for all the fields. */
5665 [ # # ]: 0 : if (!flow_hw_modify_field_is_add_dst_valid(action_conf))
5666 : 0 : return rte_flow_error_set(error, EINVAL,
5667 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5668 : : "invalid add_field destination");
5669 : : return 0;
5670 : : }
5671 : :
5672 : : static int
5673 : 0 : flow_hw_validate_action_port_representor(struct rte_eth_dev *dev __rte_unused,
5674 : : const struct rte_flow_actions_template_attr *attr,
5675 : : const struct rte_flow_action *action,
5676 : : const struct rte_flow_action *mask,
5677 : : struct rte_flow_error *error)
5678 : : {
5679 : : const struct rte_flow_action_ethdev *action_conf = NULL;
5680 : : const struct rte_flow_action_ethdev *mask_conf = NULL;
5681 : :
5682 : : /* If transfer is set, port has been validated as proxy port. */
5683 [ # # ]: 0 : if (!attr->transfer)
5684 : 0 : return rte_flow_error_set(error, EINVAL,
5685 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5686 : : "cannot use port_representor actions"
5687 : : " without an E-Switch");
5688 [ # # ]: 0 : if (!action || !mask)
5689 : 0 : return rte_flow_error_set(error, EINVAL,
5690 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5691 : : "actiona and mask configuration must be set");
5692 : 0 : action_conf = action->conf;
5693 : 0 : mask_conf = mask->conf;
5694 [ # # # # : 0 : if (!mask_conf || mask_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR ||
# # ]
5695 [ # # ]: 0 : !action_conf || action_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
5696 : 0 : return rte_flow_error_set(error, EINVAL,
5697 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5698 : : "only eswitch manager port 0xffff is"
5699 : : " supported");
5700 : : return 0;
5701 : : }
5702 : :
5703 : : static int
5704 : 0 : flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
5705 : : const struct rte_flow_action *action,
5706 : : const struct rte_flow_action *mask,
5707 : : struct rte_flow_error *error)
5708 : : {
5709 : 0 : const struct rte_flow_action_ethdev *action_conf = action->conf;
5710 : 0 : const struct rte_flow_action_ethdev *mask_conf = mask->conf;
5711 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5712 : :
5713 [ # # ]: 0 : if (!priv->sh->config.dv_esw_en)
5714 : 0 : return rte_flow_error_set(error, EINVAL,
5715 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5716 : : "cannot use represented_port actions"
5717 : : " without an E-Switch");
5718 [ # # # # ]: 0 : if (mask_conf && mask_conf->port_id) {
5719 : : struct mlx5_priv *port_priv;
5720 : : struct mlx5_priv *dev_priv;
5721 : :
5722 [ # # ]: 0 : if (!action_conf)
5723 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5724 : : action, "port index was not provided");
5725 : 0 : port_priv = mlx5_port_to_eswitch_info(action_conf->port_id, false);
5726 [ # # ]: 0 : if (!port_priv)
5727 : 0 : return rte_flow_error_set(error, rte_errno,
5728 : : RTE_FLOW_ERROR_TYPE_ACTION,
5729 : : action,
5730 : : "failed to obtain E-Switch"
5731 : : " info for port");
5732 : 0 : dev_priv = mlx5_dev_to_eswitch_info(dev);
5733 [ # # ]: 0 : if (!dev_priv)
5734 : 0 : return rte_flow_error_set(error, rte_errno,
5735 : : RTE_FLOW_ERROR_TYPE_ACTION,
5736 : : action,
5737 : : "failed to obtain E-Switch"
5738 : : " info for transfer proxy");
5739 [ # # ]: 0 : if (port_priv->domain_id != dev_priv->domain_id)
5740 : 0 : return rte_flow_error_set(error, rte_errno,
5741 : : RTE_FLOW_ERROR_TYPE_ACTION,
5742 : : action,
5743 : : "cannot forward to port from"
5744 : : " a different E-Switch");
5745 : : }
5746 : : return 0;
5747 : : }
5748 : :
5749 : : /**
5750 : : * Validate AGE action.
5751 : : *
5752 : : * @param[in] dev
5753 : : * Pointer to rte_eth_dev structure.
5754 : : * @param[in] action
5755 : : * Pointer to the indirect action.
5756 : : * @param[in] action_flags
5757 : : * Holds the actions detected until now.
5758 : : * @param[in] fixed_cnt
5759 : : * Indicator if this list has a fixed COUNT action.
5760 : : * @param[out] error
5761 : : * Pointer to error structure.
5762 : : *
5763 : : * @return
5764 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5765 : : */
5766 : : static int
5767 : 0 : flow_hw_validate_action_age(struct rte_eth_dev *dev,
5768 : : const struct rte_flow_action *action,
5769 : : uint64_t action_flags, bool fixed_cnt,
5770 : : struct rte_flow_error *error)
5771 : : {
5772 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5773 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
5774 : :
5775 [ # # ]: 0 : if (!priv->sh->cdev->config.devx)
5776 : 0 : return rte_flow_error_set(error, ENOTSUP,
5777 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5778 : : NULL, "AGE action not supported");
5779 [ # # ]: 0 : if (age_info->ages_ipool == NULL)
5780 : 0 : return rte_flow_error_set(error, EINVAL,
5781 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5782 : : "aging pool not initialized");
5783 [ # # ]: 0 : if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
5784 : : (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
5785 : 0 : return rte_flow_error_set(error, EINVAL,
5786 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5787 : : "duplicate AGE actions set");
5788 [ # # ]: 0 : if (fixed_cnt)
5789 : 0 : return rte_flow_error_set(error, EINVAL,
5790 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5791 : : "AGE and fixed COUNT combination is not supported");
5792 : : return 0;
5793 : : }
5794 : :
5795 : : /**
5796 : : * Validate count action.
5797 : : *
5798 : : * @param[in] dev
5799 : : * Pointer to rte_eth_dev structure.
5800 : : * @param[in] action
5801 : : * Pointer to the indirect action.
5802 : : * @param[in] mask
5803 : : * Pointer to the indirect action mask.
5804 : : * @param[in] action_flags
5805 : : * Holds the actions detected until now.
5806 : : * @param[out] error
5807 : : * Pointer to error structure.
5808 : : *
5809 : : * @return
5810 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5811 : : */
5812 : : static int
5813 : 0 : flow_hw_validate_action_count(struct rte_eth_dev *dev,
5814 : : const struct rte_flow_action *action,
5815 : : const struct rte_flow_action *mask,
5816 : : uint64_t action_flags,
5817 : : struct rte_flow_error *error)
5818 : : {
5819 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5820 : 0 : const struct rte_flow_action_count *count = mask->conf;
5821 : :
5822 [ # # ]: 0 : if (!priv->sh->cdev->config.devx)
5823 : 0 : return rte_flow_error_set(error, ENOTSUP,
5824 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5825 : : "count action not supported");
5826 [ # # ]: 0 : if (!priv->hws_cpool)
5827 : 0 : return rte_flow_error_set(error, EINVAL,
5828 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5829 : : "counters pool not initialized");
5830 [ # # ]: 0 : if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
5831 : : (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
5832 : 0 : return rte_flow_error_set(error, EINVAL,
5833 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5834 : : "duplicate count actions set");
5835 [ # # # # : 0 : if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
# # ]
5836 : 0 : return rte_flow_error_set(error, EINVAL,
5837 : : RTE_FLOW_ERROR_TYPE_ACTION, mask,
5838 : : "AGE and COUNT action shared by mask combination is not supported");
5839 : : return 0;
5840 : : }
5841 : :
5842 : : /**
5843 : : * Validate meter_mark action.
5844 : : *
5845 : : * @param[in] dev
5846 : : * Pointer to rte_eth_dev structure.
5847 : : * @param[in] action
5848 : : * Pointer to the indirect action.
5849 : : * @param[in] indirect
5850 : : * If true, then provided action was passed using an indirect action.
5851 : : * @param[out] error
5852 : : * Pointer to error structure.
5853 : : *
5854 : : * @return
5855 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5856 : : */
5857 : : static int
5858 : 0 : flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
5859 : : const struct rte_flow_action *action,
5860 : : bool indirect,
5861 : : struct rte_flow_error *error)
5862 : : {
5863 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5864 : :
5865 : : RTE_SET_USED(action);
5866 : :
5867 [ # # ]: 0 : if (!priv->sh->cdev->config.devx)
5868 : 0 : return rte_flow_error_set(error, ENOTSUP,
5869 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5870 : : "meter_mark action not supported");
5871 [ # # # # ]: 0 : if (!indirect && priv->shared_host)
5872 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action,
5873 : : "meter_mark action can only be used on host port");
5874 [ # # ]: 0 : if (!priv->hws_mpool)
5875 : 0 : return rte_flow_error_set(error, EINVAL,
5876 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5877 : : "meter_mark pool not initialized");
5878 : : return 0;
5879 : : }
5880 : :
5881 : : /**
5882 : : * Validate indirect action.
5883 : : *
5884 : : * @param[in] dev
5885 : : * Pointer to rte_eth_dev structure.
5886 : : * @param[in] action
5887 : : * Pointer to the indirect action.
5888 : : * @param[in] mask
5889 : : * Pointer to the indirect action mask.
5890 : : * @param[in, out] action_flags
5891 : : * Holds the actions detected until now.
5892 : : * @param[in, out] fixed_cnt
5893 : : * Pointer to indicator if this list has a fixed COUNT action.
5894 : : * @param[out] error
5895 : : * Pointer to error structure.
5896 : : *
5897 : : * @return
5898 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5899 : : */
5900 : : static int
5901 : 0 : flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
5902 : : const struct rte_flow_action *action,
5903 : : const struct rte_flow_action *mask,
5904 : : uint64_t *action_flags, bool *fixed_cnt,
5905 : : struct rte_flow_error *error)
5906 : : {
5907 : : uint32_t type;
5908 : : int ret;
5909 : :
5910 [ # # ]: 0 : if (!mask)
5911 : 0 : return rte_flow_error_set(error, EINVAL,
5912 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5913 : : "Unable to determine indirect action type without a mask specified");
5914 : 0 : type = mask->type;
5915 [ # # # # : 0 : switch (type) {
# # # ]
5916 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
5917 : 0 : ret = flow_hw_validate_action_meter_mark(dev, mask, true, error);
5918 [ # # ]: 0 : if (ret < 0)
5919 : : return ret;
5920 : 0 : *action_flags |= MLX5_FLOW_ACTION_METER;
5921 : 0 : break;
5922 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
5923 : : /* TODO: Validation logic (same as flow_hw_actions_validate) */
5924 : 0 : *action_flags |= MLX5_FLOW_ACTION_RSS;
5925 : 0 : break;
5926 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
5927 : : /* TODO: Validation logic (same as flow_hw_actions_validate) */
5928 : 0 : *action_flags |= MLX5_FLOW_ACTION_CT;
5929 : 0 : break;
5930 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
5931 [ # # # # ]: 0 : if (action->conf && mask->conf) {
5932 [ # # ]: 0 : if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
5933 : : (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
5934 : : /*
5935 : : * AGE cannot use indirect counter which is
5936 : : * shared with enother flow rules.
5937 : : */
5938 : 0 : return rte_flow_error_set(error, EINVAL,
5939 : : RTE_FLOW_ERROR_TYPE_ACTION,
5940 : : NULL,
5941 : : "AGE and fixed COUNT combination is not supported");
5942 : 0 : *fixed_cnt = true;
5943 : : }
5944 : 0 : ret = flow_hw_validate_action_count(dev, action, mask,
5945 : : *action_flags, error);
5946 [ # # ]: 0 : if (ret < 0)
5947 : : return ret;
5948 : 0 : *action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
5949 : 0 : break;
5950 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
5951 : 0 : ret = flow_hw_validate_action_age(dev, action, *action_flags,
5952 : 0 : *fixed_cnt, error);
5953 [ # # ]: 0 : if (ret < 0)
5954 : : return ret;
5955 : 0 : *action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
5956 : 0 : break;
5957 : 0 : case RTE_FLOW_ACTION_TYPE_QUOTA:
5958 : : /* TODO: add proper quota verification */
5959 : 0 : *action_flags |= MLX5_FLOW_ACTION_QUOTA;
5960 : 0 : break;
5961 : 0 : default:
5962 : 0 : DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
5963 : 0 : return rte_flow_error_set(error, ENOTSUP,
5964 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
5965 : : "Unsupported indirect action type");
5966 : : }
5967 : : return 0;
5968 : : }
5969 : :
5970 : : /**
5971 : : * Validate ipv6_ext_push action.
5972 : : *
5973 : : * @param[in] dev
5974 : : * Pointer to rte_eth_dev structure.
5975 : : * @param[in] action
5976 : : * Pointer to the indirect action.
5977 : : * @param[out] error
5978 : : * Pointer to error structure.
5979 : : *
5980 : : * @return
5981 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5982 : : */
5983 : : static int
5984 : 0 : flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
5985 : : const struct rte_flow_action *action,
5986 : : struct rte_flow_error *error)
5987 : : {
5988 : 0 : const struct rte_flow_action_ipv6_ext_push *raw_push_data = action->conf;
5989 : :
5990 [ # # # # : 0 : if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
# # ]
5991 : 0 : return rte_flow_error_set(error, EINVAL,
5992 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5993 : : "invalid ipv6_ext_push data");
5994 [ # # # # ]: 0 : if (raw_push_data->type != IPPROTO_ROUTING ||
5995 : : raw_push_data->size > MLX5_PUSH_MAX_LEN)
5996 : 0 : return rte_flow_error_set(error, EINVAL,
5997 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
5998 : : "Unsupported ipv6_ext_push type or length");
5999 : : return 0;
6000 : : }
6001 : :
6002 : : /**
6003 : : * Validate raw_encap action.
6004 : : *
6005 : : * @param[in] dev
6006 : : * Pointer to rte_eth_dev structure.
6007 : : * @param[in] action
6008 : : * Pointer to the indirect action.
6009 : : * @param[out] error
6010 : : * Pointer to error structure.
6011 : : *
6012 : : * @return
6013 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
6014 : : */
6015 : : static int
6016 : 0 : flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
6017 : : const struct rte_flow_action *mask,
6018 : : struct rte_flow_error *error)
6019 : : {
6020 : 0 : const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
6021 : 0 : const struct rte_flow_action_raw_encap *action_conf = action->conf;
6022 : :
6023 [ # # # # ]: 0 : if (!mask_conf || !mask_conf->size)
6024 : 0 : return rte_flow_error_set(error, EINVAL,
6025 : : RTE_FLOW_ERROR_TYPE_ACTION, mask,
6026 : : "raw_encap: size must be masked");
6027 [ # # # # ]: 0 : if (!action_conf || !action_conf->size)
6028 : 0 : return rte_flow_error_set(error, EINVAL,
6029 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6030 : : "raw_encap: invalid action configuration");
6031 [ # # # # ]: 0 : if (mask_conf->data && !action_conf->data)
6032 : 0 : return rte_flow_error_set(error, EINVAL,
6033 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6034 : : "raw_encap: masked data is missing");
6035 : : return 0;
6036 : : }
6037 : :
6038 : : /**
6039 : : * Process `... / raw_decap / raw_encap / ...` actions sequence.
6040 : : * The PMD handles the sequence as a single encap or decap reformat action,
6041 : : * depending on the raw_encap configuration.
6042 : : *
6043 : : * The function assumes that the raw_decap / raw_encap location
6044 : : * in actions template list complies with relative HWS actions order:
6045 : : * for the required reformat configuration:
6046 : : * ENCAP configuration must appear before [JUMP|DROP|PORT]
6047 : : * DECAP configuration must appear at the template head.
6048 : : */
6049 : : static uint64_t
6050 : : mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
6051 : : uint32_t encap_ind, uint64_t flags)
6052 : : {
6053 : 0 : const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
6054 : :
6055 [ # # ]: 0 : if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
6056 : : return MLX5_FLOW_ACTION_ENCAP;
6057 [ # # ]: 0 : if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6058 : : return MLX5_FLOW_ACTION_ENCAP;
6059 : 0 : return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
6060 [ # # ]: 0 : MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
6061 : : }
6062 : :
6063 : : enum mlx5_hw_indirect_list_relative_position {
6064 : : MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1,
6065 : : MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0,
6066 : : MLX5_INDIRECT_LIST_POSITION_AFTER_MH,
6067 : : };
6068 : :
6069 : : static enum mlx5_hw_indirect_list_relative_position
6070 : 0 : mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action)
6071 : : {
6072 : 0 : const struct rte_flow_action_indirect_list *conf = action->conf;
6073 [ # # # # ]: 0 : enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle);
6074 : : enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6075 : : const union {
6076 : : struct mlx5_indlst_legacy *legacy;
6077 : : struct mlx5_hw_encap_decap_action *reformat;
6078 : : struct rte_flow_action_list_handle *handle;
6079 : : } h = { .handle = conf->handle};
6080 : :
6081 [ # # # # ]: 0 : switch (list_type) {
6082 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
6083 [ # # # ]: 0 : switch (h.legacy->legacy_type) {
6084 : : case RTE_FLOW_ACTION_TYPE_AGE:
6085 : : case RTE_FLOW_ACTION_TYPE_COUNT:
6086 : : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6087 : : case RTE_FLOW_ACTION_TYPE_METER_MARK:
6088 : : case RTE_FLOW_ACTION_TYPE_QUOTA:
6089 : : pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6090 : : break;
6091 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
6092 : : pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6093 : 0 : break;
6094 : 0 : default:
6095 : : pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6096 : 0 : break;
6097 : : }
6098 : : break;
6099 : : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
6100 : : pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6101 : : break;
6102 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
6103 [ # # # ]: 0 : switch (h.reformat->action_type) {
6104 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
6105 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
6106 : : pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6107 : : break;
6108 : 0 : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
6109 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
6110 : : pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6111 : 0 : break;
6112 : 0 : default:
6113 : : pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6114 : 0 : break;
6115 : : }
6116 : : break;
6117 : 0 : default:
6118 : : pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6119 : 0 : break;
6120 : : }
6121 : 0 : return pos;
6122 : : }
6123 : :
6124 : : #define MLX5_HW_EXPAND_MH_FAILED 0xffff
6125 : :
6126 : : static inline uint16_t
6127 : 0 : flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
6128 : : struct rte_flow_action masks[],
6129 : : const struct rte_flow_action *mf_actions,
6130 : : const struct rte_flow_action *mf_masks,
6131 : : uint64_t flags, uint32_t act_num,
6132 : : uint32_t mf_num)
6133 : : {
6134 : : uint32_t i, tail;
6135 : :
6136 : : MLX5_ASSERT(actions && masks);
6137 : : MLX5_ASSERT(mf_num > 0);
6138 [ # # ]: 0 : if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
6139 : : /*
6140 : : * Application action template already has Modify Field.
6141 : : * It's location will be used in DR.
6142 : : * Expanded MF action can be added before the END.
6143 : : */
6144 : 0 : i = act_num - 1;
6145 : 0 : goto insert;
6146 : : }
6147 : : /**
6148 : : * Locate the first action positioned BEFORE the new MF.
6149 : : *
6150 : : * Search for a place to insert modify header
6151 : : * from the END action backwards:
6152 : : * 1. END is always present in actions array
6153 : : * 2. END location is always at action[act_num - 1]
6154 : : * 3. END always positioned AFTER modify field location
6155 : : *
6156 : : * Relative actions order is the same for RX, TX and FDB.
6157 : : *
6158 : : * Current actions order (draft-3)
6159 : : * @see action_order_arr[]
6160 : : */
6161 [ # # ]: 0 : for (i = act_num - 2; (int)i >= 0; i--) {
6162 : : enum mlx5_hw_indirect_list_relative_position pos;
6163 : 0 : enum rte_flow_action_type type = actions[i].type;
6164 : : uint64_t reformat_type;
6165 : :
6166 [ # # ]: 0 : if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
6167 : 0 : type = masks[i].type;
6168 [ # # # # ]: 0 : switch (type) {
6169 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6170 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6171 : : case RTE_FLOW_ACTION_TYPE_DROP:
6172 : : case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
6173 : : case RTE_FLOW_ACTION_TYPE_JUMP:
6174 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
6175 : : case RTE_FLOW_ACTION_TYPE_RSS:
6176 : : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
6177 : : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
6178 : : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6179 : : case RTE_FLOW_ACTION_TYPE_VOID:
6180 : : case RTE_FLOW_ACTION_TYPE_END:
6181 : : break;
6182 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6183 : : reformat_type =
6184 : : mlx5_decap_encap_reformat_type(actions, i,
6185 : : flags);
6186 : : if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
6187 : 0 : i++;
6188 : 0 : goto insert;
6189 : : }
6190 [ # # ]: 0 : if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6191 : : i--;
6192 : : break;
6193 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6194 : 0 : pos = mlx5_hw_indirect_list_mh_position(&actions[i]);
6195 [ # # ]: 0 : if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN)
6196 : : return MLX5_HW_EXPAND_MH_FAILED;
6197 [ # # ]: 0 : if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH)
6198 : 0 : goto insert;
6199 : : break;
6200 : 0 : default:
6201 : 0 : i++; /* new MF inserted AFTER actions[i] */
6202 : 0 : goto insert;
6203 : : }
6204 : : }
6205 : : i = 0;
6206 : 0 : insert:
6207 : 0 : tail = act_num - i; /* num action to move */
6208 : 0 : memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
6209 : 0 : memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
6210 : 0 : memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
6211 : : memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
6212 : 0 : return i;
6213 : : }
6214 : :
6215 : : static int
6216 : 0 : flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
6217 : : const
6218 : : struct rte_flow_actions_template_attr *attr,
6219 : : const struct rte_flow_action *action,
6220 : : const struct rte_flow_action *mask,
6221 : : struct rte_flow_error *error)
6222 : : {
6223 : : #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
6224 : :
6225 : 0 : const bool masked_push =
6226 [ # # # # ]: 0 : X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
6227 : : const struct rte_flow_action_of_push_vlan, ethertype);
6228 : : bool masked_param;
6229 : :
6230 : : /*
6231 : : * Mandatory actions order:
6232 : : * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
6233 : : */
6234 : : RTE_SET_USED(dev);
6235 : : RTE_SET_USED(attr);
6236 : : /* Check that mark matches OF_PUSH_VLAN */
6237 [ # # ]: 0 : if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
6238 : : RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
6239 : 0 : return rte_flow_error_set(error, EINVAL,
6240 : : RTE_FLOW_ERROR_TYPE_ACTION,
6241 : : action, "OF_PUSH_VLAN: mask does not match");
6242 : : /* Check that the second template and mask items are SET_VLAN_VID */
6243 [ # # ]: 0 : if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6244 : 0 : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
6245 [ # # ]: 0 : mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6246 : : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
6247 : 0 : return rte_flow_error_set(error, EINVAL,
6248 : : RTE_FLOW_ERROR_TYPE_ACTION,
6249 : : action, "OF_PUSH_VLAN: invalid actions order");
6250 [ # # # # ]: 0 : masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
6251 : : const struct rte_flow_action_of_set_vlan_vid,
6252 : : vlan_vid);
6253 : : /*
6254 : : * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
6255 : : */
6256 [ # # ]: 0 : if (masked_push ^ masked_param)
6257 : 0 : return rte_flow_error_set(error, EINVAL,
6258 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6259 : : "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
6260 [ # # ]: 0 : if (is_of_vlan_pcp_present(action)) {
6261 [ # # ]: 0 : if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
6262 : : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
6263 : 0 : return rte_flow_error_set(error, EINVAL,
6264 : : RTE_FLOW_ERROR_TYPE_ACTION,
6265 : : action, "OF_SET_VLAN_PCP: missing mask configuration");
6266 [ # # # # ]: 0 : masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
6267 : : const struct
6268 : : rte_flow_action_of_set_vlan_pcp,
6269 : : vlan_pcp);
6270 : : /*
6271 : : * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
6272 : : */
6273 [ # # ]: 0 : if (masked_push ^ masked_param)
6274 : 0 : return rte_flow_error_set(error, EINVAL,
6275 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6276 : : "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
6277 : : }
6278 : : return 0;
6279 : : #undef X_FIELD
6280 : : }
6281 : :
6282 : : static int
6283 : 0 : flow_hw_validate_action_default_miss(struct rte_eth_dev *dev,
6284 : : const struct rte_flow_actions_template_attr *attr,
6285 : : uint64_t action_flags,
6286 : : struct rte_flow_error *error)
6287 : : {
6288 : : /*
6289 : : * The private DEFAULT_MISS action is used internally for LACP in control
6290 : : * flows. So this validation can be ignored. It can be kept right now since
6291 : : * the validation will be done only once.
6292 : : */
6293 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6294 : :
6295 [ # # ]: 0 : if (!attr->ingress || attr->egress || attr->transfer)
6296 : 0 : return rte_flow_error_set(error, EINVAL,
6297 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6298 : : "DEFAULT MISS is only supported in ingress.");
6299 [ # # ]: 0 : if (!priv->hw_def_miss)
6300 : 0 : return rte_flow_error_set(error, EINVAL,
6301 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6302 : : "DEFAULT MISS action does not exist.");
6303 [ # # ]: 0 : if (action_flags & MLX5_FLOW_FATE_ACTIONS)
6304 : 0 : return rte_flow_error_set(error, EINVAL,
6305 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6306 : : "DEFAULT MISS should be the only termination.");
6307 : : return 0;
6308 : : }
6309 : :
6310 : : static int
6311 : 0 : flow_hw_validate_action_nat64(struct rte_eth_dev *dev,
6312 : : const struct rte_flow_actions_template_attr *attr,
6313 : : const struct rte_flow_action *action,
6314 : : const struct rte_flow_action *mask,
6315 : : uint64_t action_flags,
6316 : : struct rte_flow_error *error)
6317 : : {
6318 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6319 : : const struct rte_flow_action_nat64 *nat64_c;
6320 : : enum rte_flow_nat64_type cov_type;
6321 : :
6322 : : RTE_SET_USED(action_flags);
6323 [ # # # # ]: 0 : if (mask->conf && ((const struct rte_flow_action_nat64 *)mask->conf)->type) {
6324 : 0 : nat64_c = (const struct rte_flow_action_nat64 *)action->conf;
6325 : 0 : cov_type = nat64_c->type;
6326 [ # # # # ]: 0 : if ((attr->ingress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][cov_type]) ||
6327 [ # # # # ]: 0 : (attr->egress && !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][cov_type]) ||
6328 [ # # # # ]: 0 : (attr->transfer && !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][cov_type]))
6329 : 0 : goto err_out;
6330 : : } else {
6331 : : /*
6332 : : * Usually, the actions will be used on both directions. For non-masked actions,
6333 : : * both directions' actions will be checked.
6334 : : */
6335 [ # # ]: 0 : if (attr->ingress)
6336 [ # # ]: 0 : if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_6TO4] ||
6337 [ # # ]: 0 : !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_RX][RTE_FLOW_NAT64_4TO6])
6338 : 0 : goto err_out;
6339 [ # # ]: 0 : if (attr->egress)
6340 [ # # ]: 0 : if (!priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_6TO4] ||
6341 [ # # ]: 0 : !priv->action_nat64[MLX5DR_TABLE_TYPE_NIC_TX][RTE_FLOW_NAT64_4TO6])
6342 : 0 : goto err_out;
6343 [ # # ]: 0 : if (attr->transfer)
6344 [ # # ]: 0 : if (!priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_6TO4] ||
6345 [ # # ]: 0 : !priv->action_nat64[MLX5DR_TABLE_TYPE_FDB][RTE_FLOW_NAT64_4TO6])
6346 : 0 : goto err_out;
6347 : : }
6348 : : return 0;
6349 : 0 : err_out:
6350 : 0 : return rte_flow_error_set(error, EOPNOTSUPP, RTE_FLOW_ERROR_TYPE_ACTION,
6351 : : NULL, "NAT64 action is not supported.");
6352 : : }
6353 : :
6354 : : static int
6355 : 0 : mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
6356 : : const struct rte_flow_actions_template_attr *attr,
6357 : : const struct rte_flow_action actions[],
6358 : : const struct rte_flow_action masks[],
6359 : : uint64_t *act_flags,
6360 : : struct rte_flow_error *error)
6361 : : {
6362 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6363 : : const struct rte_flow_action_count *count_mask = NULL;
6364 : 0 : bool fixed_cnt = false;
6365 : 0 : uint64_t action_flags = 0;
6366 : : bool actions_end = false;
6367 : : #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
6368 : : int table_type;
6369 : : #endif
6370 : : uint16_t i;
6371 : : int ret;
6372 : : const struct rte_flow_action_ipv6_ext_remove *remove_data;
6373 : :
6374 : : /* FDB actions are only valid to proxy port. */
6375 [ # # # # : 0 : if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
# # ]
6376 : 0 : return rte_flow_error_set(error, EINVAL,
6377 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6378 : : NULL,
6379 : : "transfer actions are only valid to proxy port");
6380 [ # # ]: 0 : for (i = 0; !actions_end; ++i) {
6381 : 0 : const struct rte_flow_action *action = &actions[i];
6382 : 0 : const struct rte_flow_action *mask = &masks[i];
6383 : :
6384 : : MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
6385 [ # # ]: 0 : if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
6386 [ # # ]: 0 : action->type != mask->type)
6387 : 0 : return rte_flow_error_set(error, ENOTSUP,
6388 : : RTE_FLOW_ERROR_TYPE_ACTION,
6389 : : action,
6390 : : "mask type does not match action type");
6391 [ # # # # : 0 : switch ((int)action->type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
6392 : : case RTE_FLOW_ACTION_TYPE_VOID:
6393 : : break;
6394 : : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6395 : : break;
6396 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
6397 : 0 : ret = flow_hw_validate_action_indirect(dev, action,
6398 : : mask,
6399 : : &action_flags,
6400 : : &fixed_cnt,
6401 : : error);
6402 [ # # ]: 0 : if (ret < 0)
6403 : 0 : return ret;
6404 : : break;
6405 : 0 : case RTE_FLOW_ACTION_TYPE_MARK:
6406 : : /* TODO: Validation logic */
6407 : 0 : action_flags |= MLX5_FLOW_ACTION_MARK;
6408 : 0 : break;
6409 : 0 : case RTE_FLOW_ACTION_TYPE_DROP:
6410 : : /* TODO: Validation logic */
6411 : 0 : action_flags |= MLX5_FLOW_ACTION_DROP;
6412 : 0 : break;
6413 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
6414 : : /* TODO: Validation logic */
6415 : 0 : action_flags |= MLX5_FLOW_ACTION_JUMP;
6416 : 0 : break;
6417 : : #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
6418 : : case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
6419 : : if (priv->shared_host)
6420 : : return rte_flow_error_set(error, ENOTSUP,
6421 : : RTE_FLOW_ERROR_TYPE_ACTION,
6422 : : action,
6423 : : "action not supported in guest port");
6424 : : table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
6425 : : ((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
6426 : : MLX5DR_TABLE_TYPE_FDB);
6427 : : if (!priv->hw_send_to_kernel[table_type])
6428 : : return rte_flow_error_set(error, ENOTSUP,
6429 : : RTE_FLOW_ERROR_TYPE_ACTION,
6430 : : action,
6431 : : "action is not available");
6432 : : action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
6433 : : break;
6434 : : #endif
6435 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
6436 : : /* TODO: Validation logic */
6437 : 0 : action_flags |= MLX5_FLOW_ACTION_QUEUE;
6438 : 0 : break;
6439 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
6440 : : /* TODO: Validation logic */
6441 : 0 : action_flags |= MLX5_FLOW_ACTION_RSS;
6442 : 0 : break;
6443 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6444 : : /* TODO: Validation logic */
6445 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
6446 : 0 : break;
6447 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6448 : : /* TODO: Validation logic */
6449 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
6450 : 0 : break;
6451 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6452 : : /* TODO: Validation logic */
6453 : 0 : action_flags |= MLX5_FLOW_ACTION_DECAP;
6454 : 0 : break;
6455 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6456 : : /* TODO: Validation logic */
6457 : 0 : action_flags |= MLX5_FLOW_ACTION_DECAP;
6458 : 0 : break;
6459 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6460 : 0 : ret = flow_hw_validate_action_raw_encap(action, mask, error);
6461 [ # # ]: 0 : if (ret < 0)
6462 : 0 : return ret;
6463 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
6464 : 0 : break;
6465 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6466 : : /* TODO: Validation logic */
6467 : 0 : action_flags |= MLX5_FLOW_ACTION_DECAP;
6468 : 0 : break;
6469 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
6470 : 0 : ret = flow_hw_validate_action_ipv6_ext_push(dev, action, error);
6471 [ # # ]: 0 : if (ret < 0)
6472 : 0 : return ret;
6473 : 0 : action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
6474 : 0 : break;
6475 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
6476 : 0 : remove_data = action->conf;
6477 : : /* Remove action must be shared. */
6478 [ # # # # ]: 0 : if (remove_data->type != IPPROTO_ROUTING || !mask) {
6479 : 0 : DRV_LOG(ERR, "Only supports shared IPv6 routing remove");
6480 : 0 : return -EINVAL;
6481 : : }
6482 : 0 : action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE;
6483 : 0 : break;
6484 : 0 : case RTE_FLOW_ACTION_TYPE_METER:
6485 : : /* TODO: Validation logic */
6486 : 0 : action_flags |= MLX5_FLOW_ACTION_METER;
6487 : 0 : break;
6488 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
6489 : 0 : ret = flow_hw_validate_action_meter_mark(dev, action, false, error);
6490 [ # # ]: 0 : if (ret < 0)
6491 : 0 : return ret;
6492 : 0 : action_flags |= MLX5_FLOW_ACTION_METER;
6493 : 0 : break;
6494 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6495 : 0 : ret = flow_hw_validate_action_modify_field(dev, action, mask,
6496 : : error);
6497 [ # # ]: 0 : if (ret < 0)
6498 : 0 : return ret;
6499 : 0 : action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
6500 : 0 : break;
6501 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
6502 : 0 : ret = flow_hw_validate_action_represented_port
6503 : : (dev, action, mask, error);
6504 [ # # ]: 0 : if (ret < 0)
6505 : 0 : return ret;
6506 : 0 : action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6507 : 0 : break;
6508 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
6509 : 0 : ret = flow_hw_validate_action_port_representor
6510 : : (dev, attr, action, mask, error);
6511 [ # # ]: 0 : if (ret < 0)
6512 : 0 : return ret;
6513 : 0 : action_flags |= MLX5_FLOW_ACTION_PORT_REPRESENTOR;
6514 : 0 : break;
6515 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
6516 [ # # # # ]: 0 : if (count_mask && count_mask->id)
6517 : 0 : fixed_cnt = true;
6518 : 0 : ret = flow_hw_validate_action_age(dev, action,
6519 : : action_flags,
6520 : : fixed_cnt, error);
6521 [ # # ]: 0 : if (ret < 0)
6522 : 0 : return ret;
6523 : 0 : action_flags |= MLX5_FLOW_ACTION_AGE;
6524 : 0 : break;
6525 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
6526 : 0 : ret = flow_hw_validate_action_count(dev, action, mask,
6527 : : action_flags,
6528 : : error);
6529 [ # # ]: 0 : if (ret < 0)
6530 : 0 : return ret;
6531 : 0 : count_mask = mask->conf;
6532 : 0 : action_flags |= MLX5_FLOW_ACTION_COUNT;
6533 : 0 : break;
6534 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6535 : : /* TODO: Validation logic */
6536 : 0 : action_flags |= MLX5_FLOW_ACTION_CT;
6537 : 0 : break;
6538 : 0 : case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
6539 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
6540 : 0 : break;
6541 : 0 : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6542 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
6543 : 0 : break;
6544 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6545 : 0 : ret = flow_hw_validate_action_push_vlan
6546 : : (dev, attr, action, mask, error);
6547 [ # # ]: 0 : if (ret != 0)
6548 : 0 : return ret;
6549 : 0 : i += is_of_vlan_pcp_present(action) ?
6550 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
6551 : : MLX5_HW_VLAN_PUSH_VID_IDX;
6552 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
6553 : 0 : break;
6554 : 0 : case RTE_FLOW_ACTION_TYPE_NAT64:
6555 : 0 : ret = flow_hw_validate_action_nat64(dev, attr, action, mask,
6556 : : action_flags, error);
6557 [ # # ]: 0 : if (ret != 0)
6558 : 0 : return ret;
6559 : 0 : action_flags |= MLX5_FLOW_ACTION_NAT64;
6560 : 0 : break;
6561 : 0 : case RTE_FLOW_ACTION_TYPE_END:
6562 : : actions_end = true;
6563 : 0 : break;
6564 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6565 : 0 : ret = flow_hw_validate_action_default_miss(dev, attr,
6566 : : action_flags, error);
6567 [ # # ]: 0 : if (ret < 0)
6568 : 0 : return ret;
6569 : 0 : action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
6570 : 0 : break;
6571 : 0 : default:
6572 : 0 : return rte_flow_error_set(error, ENOTSUP,
6573 : : RTE_FLOW_ERROR_TYPE_ACTION,
6574 : : action,
6575 : : "action not supported in template API");
6576 : : }
6577 : : }
6578 [ # # ]: 0 : if (act_flags != NULL)
6579 : 0 : *act_flags = action_flags;
6580 : : return 0;
6581 : : }
6582 : :
6583 : : static int
6584 : 0 : flow_hw_actions_validate(struct rte_eth_dev *dev,
6585 : : const struct rte_flow_actions_template_attr *attr,
6586 : : const struct rte_flow_action actions[],
6587 : : const struct rte_flow_action masks[],
6588 : : struct rte_flow_error *error)
6589 : : {
6590 : 0 : return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error);
6591 : : }
6592 : :
6593 : :
6594 : : static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
6595 : : [RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
6596 : : [RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
6597 : : [RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_TBL,
6598 : : [RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
6599 : : [RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
6600 : : [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
6601 : : [RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
6602 : : [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
6603 : : [RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
6604 : : [RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
6605 : : [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
6606 : : [RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = MLX5DR_ACTION_TYP_MISS,
6607 : : [RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
6608 : : [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
6609 : : [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
6610 : : [RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
6611 : : [RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
6612 : : [RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
6613 : : [RTE_FLOW_ACTION_TYPE_NAT64] = MLX5DR_ACTION_TYP_NAT64,
6614 : : };
6615 : :
6616 : : static inline void
6617 : : action_template_set_type(struct rte_flow_actions_template *at,
6618 : : enum mlx5dr_action_type *action_types,
6619 : : unsigned int action_src, uint16_t *curr_off,
6620 : : enum mlx5dr_action_type type)
6621 : : {
6622 : 0 : at->dr_off[action_src] = *curr_off;
6623 : 0 : action_types[*curr_off] = type;
6624 : 0 : *curr_off = *curr_off + 1;
6625 : 0 : }
6626 : :
6627 : : static int
6628 : 0 : flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
6629 : : enum mlx5dr_action_type *action_types,
6630 : : uint16_t *curr_off, uint16_t *cnt_off,
6631 : : struct rte_flow_actions_template *at)
6632 : : {
6633 [ # # # # : 0 : switch (type) {
# ]
6634 : : case RTE_FLOW_ACTION_TYPE_RSS:
6635 : : action_template_set_type(at, action_types, action_src, curr_off,
6636 : : MLX5DR_ACTION_TYP_TIR);
6637 : : break;
6638 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
6639 : : case RTE_FLOW_ACTION_TYPE_COUNT:
6640 : : /*
6641 : : * Both AGE and COUNT action need counter, the first one fills
6642 : : * the action_types array, and the second only saves the offset.
6643 : : */
6644 [ # # ]: 0 : if (*cnt_off == UINT16_MAX) {
6645 : 0 : *cnt_off = *curr_off;
6646 : : action_template_set_type(at, action_types,
6647 : : action_src, curr_off,
6648 : : MLX5DR_ACTION_TYP_CTR);
6649 : : }
6650 : 0 : at->dr_off[action_src] = *cnt_off;
6651 : 0 : break;
6652 : : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6653 : : action_template_set_type(at, action_types, action_src, curr_off,
6654 : : MLX5DR_ACTION_TYP_ASO_CT);
6655 : : break;
6656 : : case RTE_FLOW_ACTION_TYPE_QUOTA:
6657 : : case RTE_FLOW_ACTION_TYPE_METER_MARK:
6658 : : action_template_set_type(at, action_types, action_src, curr_off,
6659 : : MLX5DR_ACTION_TYP_ASO_METER);
6660 : : break;
6661 : 0 : default:
6662 : 0 : DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
6663 : 0 : return -EINVAL;
6664 : : }
6665 : : return 0;
6666 : : }
6667 : :
6668 : :
6669 : : static int
6670 : 0 : flow_hw_template_actions_list(struct rte_flow_actions_template *at,
6671 : : unsigned int action_src,
6672 : : enum mlx5dr_action_type *action_types,
6673 : : uint16_t *curr_off, uint16_t *cnt_off)
6674 : : {
6675 : : int ret;
6676 : 0 : const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
6677 [ # # # # ]: 0 : enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
6678 : : const union {
6679 : : struct mlx5_indlst_legacy *legacy;
6680 : : struct rte_flow_action_list_handle *handle;
6681 : : } indlst_obj = { .handle = indlst_conf->handle };
6682 : : enum mlx5dr_action_type type;
6683 : :
6684 [ # # # # ]: 0 : switch (list_type) {
6685 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
6686 : 0 : ret = flow_hw_dr_actions_template_handle_shared
6687 : 0 : (indlst_obj.legacy->legacy_type, action_src,
6688 : : action_types, curr_off, cnt_off, at);
6689 [ # # ]: 0 : if (ret)
6690 : 0 : return ret;
6691 : : break;
6692 : : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
6693 : : action_template_set_type(at, action_types, action_src, curr_off,
6694 : : MLX5DR_ACTION_TYP_DEST_ARRAY);
6695 : : break;
6696 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
6697 : 0 : type = ((struct mlx5_hw_encap_decap_action *)
6698 : : (indlst_conf->handle))->action_type;
6699 : : action_template_set_type(at, action_types, action_src, curr_off, type);
6700 : : break;
6701 : 0 : default:
6702 : 0 : DRV_LOG(ERR, "Unsupported indirect list type");
6703 : 0 : return -EINVAL;
6704 : : }
6705 : : return 0;
6706 : : }
6707 : :
6708 : : /**
6709 : : * Create DR action template based on a provided sequence of flow actions.
6710 : : *
6711 : : * @param[in] dev
6712 : : * Pointer to the rte_eth_dev structure.
6713 : : * @param[in] at
6714 : : * Pointer to flow actions template to be updated.
6715 : : *
6716 : : * @return
6717 : : * DR action template pointer on success and action offsets in @p at are updated.
6718 : : * NULL otherwise.
6719 : : */
6720 : : static struct mlx5dr_action_template *
6721 : 0 : flow_hw_dr_actions_template_create(struct rte_eth_dev *dev,
6722 : : struct rte_flow_actions_template *at)
6723 : : {
6724 : : struct mlx5dr_action_template *dr_template;
6725 : 0 : enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
6726 : : unsigned int i;
6727 : : uint16_t curr_off;
6728 : : enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
6729 : : uint16_t reformat_off = UINT16_MAX;
6730 : : uint16_t mhdr_off = UINT16_MAX;
6731 : : uint16_t recom_off = UINT16_MAX;
6732 : 0 : uint16_t cnt_off = UINT16_MAX;
6733 : : enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
6734 : : int ret;
6735 : :
6736 [ # # ]: 0 : for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
6737 : : const struct rte_flow_action_raw_encap *raw_encap_data;
6738 : : size_t data_size;
6739 : : enum mlx5dr_action_type type;
6740 : :
6741 [ # # ]: 0 : if (curr_off >= MLX5_HW_MAX_ACTS)
6742 : 0 : goto err_actions_num;
6743 [ # # # # : 0 : switch ((int)at->actions[i].type) {
# # # # #
# # # # #
# ]
6744 : : case RTE_FLOW_ACTION_TYPE_VOID:
6745 : : break;
6746 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6747 : 0 : ret = flow_hw_template_actions_list(at, i, action_types,
6748 : : &curr_off, &cnt_off);
6749 [ # # ]: 0 : if (ret)
6750 : : return NULL;
6751 : : break;
6752 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
6753 : 0 : ret = flow_hw_dr_actions_template_handle_shared
6754 : 0 : (at->masks[i].type, i, action_types,
6755 : : &curr_off, &cnt_off, at);
6756 [ # # ]: 0 : if (ret)
6757 : : return NULL;
6758 : : break;
6759 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6760 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6761 : : case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
6762 : : case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
6763 : : MLX5_ASSERT(reformat_off == UINT16_MAX);
6764 : 0 : reformat_off = curr_off++;
6765 : 0 : reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
6766 : 0 : break;
6767 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
6768 : : MLX5_ASSERT(recom_off == UINT16_MAX);
6769 : : recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
6770 : 0 : recom_off = curr_off++;
6771 : 0 : break;
6772 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
6773 : : MLX5_ASSERT(recom_off == UINT16_MAX);
6774 : : recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
6775 : 0 : recom_off = curr_off++;
6776 : 0 : break;
6777 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6778 : 0 : raw_encap_data = at->actions[i].conf;
6779 : 0 : data_size = raw_encap_data->size;
6780 [ # # ]: 0 : if (reformat_off != UINT16_MAX) {
6781 : : reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
6782 [ # # ]: 0 : MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
6783 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
6784 : : } else {
6785 : 0 : reformat_off = curr_off++;
6786 : : reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
6787 : : }
6788 : : break;
6789 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
6790 : 0 : reformat_off = curr_off++;
6791 : : reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
6792 : 0 : break;
6793 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
6794 [ # # ]: 0 : if (mhdr_off == UINT16_MAX) {
6795 : 0 : mhdr_off = curr_off++;
6796 : 0 : type = mlx5_hw_dr_action_types[at->actions[i].type];
6797 : 0 : action_types[mhdr_off] = type;
6798 : : }
6799 : : break;
6800 : 0 : case RTE_FLOW_ACTION_TYPE_METER:
6801 : 0 : at->dr_off[i] = curr_off;
6802 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
6803 [ # # ]: 0 : if (curr_off >= MLX5_HW_MAX_ACTS)
6804 : 0 : goto err_actions_num;
6805 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_TBL;
6806 : 0 : break;
6807 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
6808 : 0 : type = mlx5_hw_dr_action_types[at->actions[i].type];
6809 : 0 : at->dr_off[i] = curr_off;
6810 : 0 : action_types[curr_off++] = type;
6811 : 0 : i += is_of_vlan_pcp_present(at->actions + i) ?
6812 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
6813 : : MLX5_HW_VLAN_PUSH_VID_IDX;
6814 : 0 : break;
6815 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
6816 : 0 : at->dr_off[i] = curr_off;
6817 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
6818 [ # # ]: 0 : if (curr_off >= MLX5_HW_MAX_ACTS)
6819 : 0 : goto err_actions_num;
6820 : : break;
6821 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
6822 : : case RTE_FLOW_ACTION_TYPE_COUNT:
6823 : : /*
6824 : : * Both AGE and COUNT action need counter, the first
6825 : : * one fills the action_types array, and the second only
6826 : : * saves the offset.
6827 : : */
6828 [ # # ]: 0 : if (cnt_off == UINT16_MAX) {
6829 : 0 : cnt_off = curr_off++;
6830 : 0 : action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
6831 : : }
6832 : 0 : at->dr_off[i] = cnt_off;
6833 : 0 : break;
6834 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
6835 : 0 : at->dr_off[i] = curr_off;
6836 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS;
6837 : 0 : break;
6838 : 0 : default:
6839 : 0 : type = mlx5_hw_dr_action_types[at->actions[i].type];
6840 : 0 : at->dr_off[i] = curr_off;
6841 : 0 : action_types[curr_off++] = type;
6842 : 0 : break;
6843 : : }
6844 : : }
6845 [ # # ]: 0 : if (curr_off >= MLX5_HW_MAX_ACTS)
6846 : 0 : goto err_actions_num;
6847 [ # # ]: 0 : if (mhdr_off != UINT16_MAX)
6848 : 0 : at->mhdr_off = mhdr_off;
6849 [ # # ]: 0 : if (reformat_off != UINT16_MAX) {
6850 : 0 : at->reformat_off = reformat_off;
6851 : 0 : action_types[reformat_off] = reformat_act_type;
6852 : : }
6853 [ # # ]: 0 : if (recom_off != UINT16_MAX) {
6854 : 0 : at->recom_off = recom_off;
6855 : 0 : action_types[recom_off] = recom_type;
6856 : : }
6857 : 0 : dr_template = mlx5dr_action_template_create(action_types, 0);
6858 [ # # ]: 0 : if (dr_template) {
6859 : 0 : at->dr_actions_num = curr_off;
6860 : : } else {
6861 : 0 : DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
6862 : 0 : return NULL;
6863 : : }
6864 : : /* Create srh flex parser for remove anchor. */
6865 [ # # ]: 0 : if ((recom_type == MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT ||
6866 [ # # ]: 0 : recom_type == MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) &&
6867 : 0 : mlx5_alloc_srh_flex_parser(dev)) {
6868 : 0 : DRV_LOG(ERR, "Failed to create srv6 flex parser");
6869 : 0 : claim_zero(mlx5dr_action_template_destroy(dr_template));
6870 : 0 : return NULL;
6871 : : }
6872 : : return dr_template;
6873 : 0 : err_actions_num:
6874 : 0 : DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
6875 : : curr_off, MLX5_HW_MAX_ACTS);
6876 : 0 : return NULL;
6877 : : }
6878 : :
6879 : : static void
6880 : 0 : flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
6881 : : struct rte_flow_action *ra,
6882 : : struct rte_flow_action *rm,
6883 : : struct rte_flow_action_modify_field *spec,
6884 : : struct rte_flow_action_modify_field *mask,
6885 : : int set_vlan_vid_ix)
6886 : : {
6887 : : struct rte_flow_error error;
6888 [ # # ]: 0 : const bool masked = rm[set_vlan_vid_ix].conf &&
6889 : : (((const struct rte_flow_action_of_set_vlan_vid *)
6890 [ # # ]: 0 : rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
6891 : 0 : const struct rte_flow_action_of_set_vlan_vid *conf =
6892 : 0 : ra[set_vlan_vid_ix].conf;
6893 : 0 : int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
6894 : : NULL, &error);
6895 : 0 : *spec = (typeof(*spec)) {
6896 : : .operation = RTE_FLOW_MODIFY_SET,
6897 : : .dst = {
6898 : : .field = RTE_FLOW_FIELD_VLAN_ID,
6899 : : .level = 0, .offset = 0,
6900 : : },
6901 : : .src = {
6902 : : .field = RTE_FLOW_FIELD_VALUE,
6903 : : },
6904 : : .width = width,
6905 : : };
6906 : 0 : *mask = (typeof(*mask)) {
6907 : : .operation = RTE_FLOW_MODIFY_SET,
6908 : : .dst = {
6909 : : .field = RTE_FLOW_FIELD_VLAN_ID,
6910 : : .level = 0xff, .offset = 0xffffffff,
6911 : : },
6912 : : .src = {
6913 : : .field = RTE_FLOW_FIELD_VALUE,
6914 : : },
6915 : : .width = 0xffffffff,
6916 : : };
6917 [ # # ]: 0 : if (masked) {
6918 : 0 : uint32_t mask_val = 0xffffffff;
6919 : :
6920 [ # # ]: 0 : rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid));
6921 [ # # ]: 0 : rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val));
6922 : : }
6923 : 0 : ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
6924 : 0 : ra[set_vlan_vid_ix].conf = spec;
6925 : 0 : rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
6926 : 0 : rm[set_vlan_vid_ix].conf = mask;
6927 : 0 : }
6928 : :
6929 : : static __rte_always_inline int
6930 : : flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
6931 : : struct mlx5_modification_cmd *mhdr_cmd,
6932 : : struct mlx5_action_construct_data *act_data,
6933 : : const struct mlx5_hw_actions *hw_acts,
6934 : : const struct rte_flow_action *action)
6935 : : {
6936 : : struct rte_flow_error error;
6937 : 0 : rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
6938 : 0 : action->conf)->vlan_vid;
6939 : 0 : int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
6940 : : NULL, &error);
6941 [ # # # # : 0 : struct rte_flow_action_modify_field conf = {
# # ]
6942 : : .operation = RTE_FLOW_MODIFY_SET,
6943 : : .dst = {
6944 : : .field = RTE_FLOW_FIELD_VLAN_ID,
6945 : : .level = 0, .offset = 0,
6946 : : },
6947 : : .src = {
6948 : : .field = RTE_FLOW_FIELD_VALUE,
6949 : : },
6950 : : .width = width,
6951 : : };
6952 : : struct rte_flow_action modify_action = {
6953 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
6954 : : .conf = &conf
6955 : : };
6956 : :
6957 : : rte_memcpy(conf.src.value, &vid, sizeof(vid));
6958 : : return flow_hw_modify_field_construct(mhdr_cmd, act_data, hw_acts, &modify_action);
6959 : : }
6960 : :
6961 : : static int
6962 : 0 : flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
6963 : : struct rte_flow_item_flex_handle *handle,
6964 : : uint8_t *flex_item)
6965 : : {
6966 : 0 : int index = mlx5_flex_acquire_index(dev, handle, false);
6967 : :
6968 : : MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
6969 [ # # ]: 0 : if (index < 0)
6970 : : return -1;
6971 [ # # ]: 0 : if (!(*flex_item & RTE_BIT32(index))) {
6972 : : /* Don't count same flex item again. */
6973 : 0 : if (mlx5_flex_acquire_index(dev, handle, true) != index)
6974 : : MLX5_ASSERT(false);
6975 : 0 : *flex_item |= (uint8_t)RTE_BIT32(index);
6976 : : }
6977 : : return 0;
6978 : : }
6979 : :
6980 : : static void
6981 : 0 : flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
6982 : : {
6983 [ # # ]: 0 : while (*flex_item) {
6984 : 0 : int index = rte_bsf32(*flex_item);
6985 : :
6986 : 0 : mlx5_flex_release_index(dev, index);
6987 : 0 : *flex_item &= ~(uint8_t)RTE_BIT32(index);
6988 : : }
6989 : 0 : }
6990 : : static __rte_always_inline void
6991 : : flow_hw_actions_template_replace_container(const
6992 : : struct rte_flow_action *actions,
6993 : : const
6994 : : struct rte_flow_action *masks,
6995 : : struct rte_flow_action *new_actions,
6996 : : struct rte_flow_action *new_masks,
6997 : : struct rte_flow_action **ra,
6998 : : struct rte_flow_action **rm,
6999 : : uint32_t act_num)
7000 : : {
7001 : 0 : memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
7002 : : memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
7003 : : *ra = (void *)(uintptr_t)new_actions;
7004 : : *rm = (void *)(uintptr_t)new_masks;
7005 : 0 : }
7006 : :
7007 : : /* Action template copies these actions in rte_flow_conv() */
7008 : :
7009 : : static const struct rte_flow_action rx_meta_copy_action = {
7010 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7011 : : .conf = &(struct rte_flow_action_modify_field){
7012 : : .operation = RTE_FLOW_MODIFY_SET,
7013 : : .dst = {
7014 : : .field = (enum rte_flow_field_id)
7015 : : MLX5_RTE_FLOW_FIELD_META_REG,
7016 : : .tag_index = REG_B,
7017 : : },
7018 : : .src = {
7019 : : .field = (enum rte_flow_field_id)
7020 : : MLX5_RTE_FLOW_FIELD_META_REG,
7021 : : .tag_index = REG_C_1,
7022 : : },
7023 : : .width = 32,
7024 : : }
7025 : : };
7026 : :
7027 : : static const struct rte_flow_action rx_meta_copy_mask = {
7028 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7029 : : .conf = &(struct rte_flow_action_modify_field){
7030 : : .operation = RTE_FLOW_MODIFY_SET,
7031 : : .dst = {
7032 : : .field = (enum rte_flow_field_id)
7033 : : MLX5_RTE_FLOW_FIELD_META_REG,
7034 : : .level = UINT8_MAX,
7035 : : .tag_index = UINT8_MAX,
7036 : : .offset = UINT32_MAX,
7037 : : },
7038 : : .src = {
7039 : : .field = (enum rte_flow_field_id)
7040 : : MLX5_RTE_FLOW_FIELD_META_REG,
7041 : : .level = UINT8_MAX,
7042 : : .tag_index = UINT8_MAX,
7043 : : .offset = UINT32_MAX,
7044 : : },
7045 : : .width = UINT32_MAX,
7046 : : }
7047 : : };
7048 : :
7049 : : static const struct rte_flow_action quota_color_inc_action = {
7050 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7051 : : .conf = &(struct rte_flow_action_modify_field) {
7052 : : .operation = RTE_FLOW_MODIFY_ADD,
7053 : : .dst = {
7054 : : .field = RTE_FLOW_FIELD_METER_COLOR,
7055 : : .level = 0, .offset = 0
7056 : : },
7057 : : .src = {
7058 : : .field = RTE_FLOW_FIELD_VALUE,
7059 : : .level = 1,
7060 : : .offset = 0,
7061 : : },
7062 : : .width = 2
7063 : : }
7064 : : };
7065 : :
7066 : : static const struct rte_flow_action quota_color_inc_mask = {
7067 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7068 : : .conf = &(struct rte_flow_action_modify_field) {
7069 : : .operation = RTE_FLOW_MODIFY_ADD,
7070 : : .dst = {
7071 : : .field = RTE_FLOW_FIELD_METER_COLOR,
7072 : : .level = UINT8_MAX,
7073 : : .tag_index = UINT8_MAX,
7074 : : .offset = UINT32_MAX,
7075 : : },
7076 : : .src = {
7077 : : .field = RTE_FLOW_FIELD_VALUE,
7078 : : .level = 3,
7079 : : .offset = 0
7080 : : },
7081 : : .width = UINT32_MAX
7082 : : }
7083 : : };
7084 : :
7085 : : /**
7086 : : * Create flow action template.
7087 : : *
7088 : : * @param[in] dev
7089 : : * Pointer to the rte_eth_dev structure.
7090 : : * @param[in] attr
7091 : : * Pointer to the action template attributes.
7092 : : * @param[in] actions
7093 : : * Associated actions (list terminated by the END action).
7094 : : * @param[in] masks
7095 : : * List of actions that marks which of the action's member is constant.
7096 : : * @param[out] error
7097 : : * Pointer to error structure.
7098 : : *
7099 : : * @return
7100 : : * Action template pointer on success, NULL otherwise and rte_errno is set.
7101 : : */
7102 : : static struct rte_flow_actions_template *
7103 : 0 : flow_hw_actions_template_create(struct rte_eth_dev *dev,
7104 : : const struct rte_flow_actions_template_attr *attr,
7105 : : const struct rte_flow_action actions[],
7106 : : const struct rte_flow_action masks[],
7107 : : struct rte_flow_error *error)
7108 : : {
7109 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7110 : : int len, act_len, mask_len;
7111 : : unsigned int act_num;
7112 : : unsigned int i;
7113 : : struct rte_flow_actions_template *at = NULL;
7114 : : uint16_t pos = UINT16_MAX;
7115 : 0 : uint64_t action_flags = 0;
7116 : : struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
7117 : : struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
7118 : : struct rte_flow_action *ra = (void *)(uintptr_t)actions;
7119 : : struct rte_flow_action *rm = (void *)(uintptr_t)masks;
7120 : : int set_vlan_vid_ix = -1;
7121 : 0 : struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
7122 : 0 : struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
7123 : : struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
7124 : : struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
7125 : : uint32_t expand_mf_num = 0;
7126 : 0 : uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
7127 : :
7128 [ # # ]: 0 : if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
7129 : : &action_flags, error))
7130 : : return NULL;
7131 [ # # ]: 0 : for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7132 [ # # # ]: 0 : switch (ra[i].type) {
7133 : : /* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
7134 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7135 : 0 : i += is_of_vlan_pcp_present(ra + i) ?
7136 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
7137 : : MLX5_HW_VLAN_PUSH_VID_IDX;
7138 : 0 : break;
7139 : 0 : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7140 : 0 : set_vlan_vid_ix = i;
7141 : 0 : break;
7142 : : default:
7143 : : break;
7144 : : }
7145 : : }
7146 : : /*
7147 : : * Count flow actions to allocate required space for storing DR offsets and to check
7148 : : * if temporary buffer would not be overrun.
7149 : : */
7150 : 0 : act_num = i + 1;
7151 [ # # ]: 0 : if (act_num >= MLX5_HW_MAX_ACTS) {
7152 : 0 : rte_flow_error_set(error, EINVAL,
7153 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
7154 : 0 : return NULL;
7155 : : }
7156 [ # # ]: 0 : if (set_vlan_vid_ix != -1) {
7157 : : /* If temporary action buffer was not used, copy template actions to it */
7158 : : if (ra == actions)
7159 : : flow_hw_actions_template_replace_container(actions,
7160 : : masks,
7161 : : tmp_action,
7162 : : tmp_mask,
7163 : : &ra, &rm,
7164 : : act_num);
7165 : 0 : flow_hw_set_vlan_vid(dev, ra, rm,
7166 : : &set_vlan_vid_spec, &set_vlan_vid_mask,
7167 : : set_vlan_vid_ix);
7168 : 0 : action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7169 : : }
7170 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
7171 : 0 : mf_actions[expand_mf_num] = quota_color_inc_action;
7172 : 0 : mf_masks[expand_mf_num] = quota_color_inc_mask;
7173 : : expand_mf_num++;
7174 : : }
7175 [ # # ]: 0 : if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
7176 : 0 : priv->sh->config.dv_esw_en &&
7177 [ # # ]: 0 : (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
7178 : : /* Insert META copy */
7179 : 0 : mf_actions[expand_mf_num] = rx_meta_copy_action;
7180 : 0 : mf_masks[expand_mf_num] = rx_meta_copy_mask;
7181 : 0 : expand_mf_num++;
7182 : : }
7183 [ # # ]: 0 : if (expand_mf_num) {
7184 [ # # ]: 0 : if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
7185 : 0 : rte_flow_error_set(error, E2BIG,
7186 : : RTE_FLOW_ERROR_TYPE_ACTION,
7187 : : NULL, "cannot expand: too many actions");
7188 : 0 : return NULL;
7189 : : }
7190 [ # # ]: 0 : if (ra == actions)
7191 : : flow_hw_actions_template_replace_container(actions,
7192 : : masks,
7193 : : tmp_action,
7194 : : tmp_mask,
7195 : : &ra, &rm,
7196 : : act_num);
7197 : : /* Application should make sure only one Q/RSS exist in one rule. */
7198 : 0 : pos = flow_hw_template_expand_modify_field(ra, rm,
7199 : : mf_actions,
7200 : : mf_masks,
7201 : : action_flags,
7202 : : act_num,
7203 : : expand_mf_num);
7204 [ # # ]: 0 : if (pos == MLX5_HW_EXPAND_MH_FAILED) {
7205 : 0 : rte_flow_error_set(error, ENOMEM,
7206 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7207 : : NULL, "modify header expansion failed");
7208 : 0 : return NULL;
7209 : : }
7210 : : act_num += expand_mf_num;
7211 [ # # ]: 0 : for (i = pos + expand_mf_num; i < act_num; i++)
7212 : 0 : src_off[i] += expand_mf_num;
7213 : 0 : action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7214 : : }
7215 : 0 : act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
7216 [ # # ]: 0 : if (act_len <= 0)
7217 : : return NULL;
7218 : 0 : len = RTE_ALIGN(act_len, 16);
7219 : 0 : mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
7220 [ # # ]: 0 : if (mask_len <= 0)
7221 : : return NULL;
7222 : 0 : len += RTE_ALIGN(mask_len, 16);
7223 : 0 : len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
7224 : 0 : len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
7225 : 0 : at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
7226 : 0 : RTE_CACHE_LINE_SIZE, rte_socket_id());
7227 [ # # ]: 0 : if (!at) {
7228 : 0 : rte_flow_error_set(error, ENOMEM,
7229 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7230 : : NULL,
7231 : : "cannot allocate action template");
7232 : 0 : return NULL;
7233 : : }
7234 : : /* Actions part is in the first part. */
7235 : 0 : at->attr = *attr;
7236 : 0 : at->actions = (struct rte_flow_action *)(at + 1);
7237 : 0 : act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
7238 : : len, ra, error);
7239 [ # # ]: 0 : if (act_len <= 0)
7240 : 0 : goto error;
7241 : : /* Masks part is in the second part. */
7242 : 0 : at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
7243 : 0 : mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
7244 : 0 : len - act_len, rm, error);
7245 [ # # ]: 0 : if (mask_len <= 0)
7246 : 0 : goto error;
7247 : : /* DR actions offsets in the third part. */
7248 : 0 : at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
7249 : 0 : at->src_off = RTE_PTR_ADD(at->dr_off,
7250 : : RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
7251 : : memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
7252 : 0 : at->actions_num = act_num;
7253 [ # # ]: 0 : for (i = 0; i < at->actions_num; ++i)
7254 : 0 : at->dr_off[i] = UINT16_MAX;
7255 : 0 : at->reformat_off = UINT16_MAX;
7256 : 0 : at->mhdr_off = UINT16_MAX;
7257 : 0 : at->recom_off = UINT16_MAX;
7258 [ # # ]: 0 : for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
7259 : 0 : actions++, masks++, i++) {
7260 : : const struct rte_flow_action_modify_field *info;
7261 : :
7262 [ # # # ]: 0 : switch (actions->type) {
7263 : : /*
7264 : : * mlx5 PMD hacks indirect action index directly to the action conf.
7265 : : * The rte_flow_conv() function copies the content from conf pointer.
7266 : : * Need to restore the indirect action index from action conf here.
7267 : : */
7268 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
7269 : 0 : at->actions[i].conf = ra[i].conf;
7270 : 0 : at->masks[i].conf = rm[i].conf;
7271 : 0 : break;
7272 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7273 : 0 : info = actions->conf;
7274 [ # # # # ]: 0 : if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
7275 : 0 : flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
7276 : 0 : &at->flex_item)) ||
7277 [ # # # # ]: 0 : (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
7278 : 0 : flow_hw_flex_item_acquire(dev, info->src.flex_handle,
7279 : : &at->flex_item)))
7280 : 0 : goto error;
7281 : : break;
7282 : : default:
7283 : : break;
7284 : : }
7285 : : }
7286 : 0 : at->tmpl = flow_hw_dr_actions_template_create(dev, at);
7287 [ # # ]: 0 : if (!at->tmpl)
7288 : 0 : goto error;
7289 : 0 : at->action_flags = action_flags;
7290 : 0 : __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
7291 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
7292 : 0 : return at;
7293 : 0 : error:
7294 : : if (at) {
7295 [ # # ]: 0 : if (at->tmpl)
7296 : 0 : mlx5dr_action_template_destroy(at->tmpl);
7297 : 0 : mlx5_free(at);
7298 : : }
7299 : 0 : rte_flow_error_set(error, rte_errno,
7300 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7301 : : "Failed to create action template");
7302 : 0 : return NULL;
7303 : : }
7304 : :
7305 : : /**
7306 : : * Destroy flow action template.
7307 : : *
7308 : : * @param[in] dev
7309 : : * Pointer to the rte_eth_dev structure.
7310 : : * @param[in] template
7311 : : * Pointer to the action template to be destroyed.
7312 : : * @param[out] error
7313 : : * Pointer to error structure.
7314 : : *
7315 : : * @return
7316 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
7317 : : */
7318 : : static int
7319 : 0 : flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
7320 : : struct rte_flow_actions_template *template,
7321 : : struct rte_flow_error *error __rte_unused)
7322 : : {
7323 : : uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
7324 : : MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
7325 : :
7326 [ # # ]: 0 : if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
7327 : 0 : DRV_LOG(WARNING, "Action template %p is still in use.",
7328 : : (void *)template);
7329 : 0 : return rte_flow_error_set(error, EBUSY,
7330 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7331 : : NULL,
7332 : : "action template in using");
7333 : : }
7334 [ # # ]: 0 : if (template->action_flags & flag)
7335 : 0 : mlx5_free_srh_flex_parser(dev);
7336 [ # # ]: 0 : LIST_REMOVE(template, next);
7337 : 0 : flow_hw_flex_item_release(dev, &template->flex_item);
7338 [ # # ]: 0 : if (template->tmpl)
7339 : 0 : mlx5dr_action_template_destroy(template->tmpl);
7340 : 0 : mlx5_free(template);
7341 : 0 : return 0;
7342 : : }
7343 : :
7344 : : static uint32_t
7345 : : flow_hw_count_items(const struct rte_flow_item *items)
7346 : : {
7347 : : const struct rte_flow_item *curr_item;
7348 : : uint32_t nb_items;
7349 : :
7350 : : nb_items = 0;
7351 [ # # ]: 0 : for (curr_item = items; curr_item->type != RTE_FLOW_ITEM_TYPE_END; ++curr_item)
7352 : 0 : ++nb_items;
7353 : 0 : return ++nb_items;
7354 : : }
7355 : :
7356 : : static struct rte_flow_item *
7357 : 0 : flow_hw_prepend_item(const struct rte_flow_item *items,
7358 : : const uint32_t nb_items,
7359 : : const struct rte_flow_item *new_item,
7360 : : struct rte_flow_error *error)
7361 : : {
7362 : : struct rte_flow_item *copied_items;
7363 : : size_t size;
7364 : :
7365 : : /* Allocate new array of items. */
7366 : 0 : size = sizeof(*copied_items) * (nb_items + 1);
7367 : 0 : copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, rte_socket_id());
7368 [ # # ]: 0 : if (!copied_items) {
7369 : 0 : rte_flow_error_set(error, ENOMEM,
7370 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7371 : : NULL,
7372 : : "cannot allocate item template");
7373 : 0 : return NULL;
7374 : : }
7375 : : /* Put new item at the beginning and copy the rest. */
7376 : 0 : copied_items[0] = *new_item;
7377 [ # # ]: 0 : rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
7378 : : return copied_items;
7379 : : }
7380 : :
7381 : : static int
7382 : 0 : flow_hw_item_compare_field_validate(enum rte_flow_field_id arg_field,
7383 : : enum rte_flow_field_id base_field,
7384 : : struct rte_flow_error *error)
7385 : : {
7386 [ # # # ]: 0 : switch (arg_field) {
7387 : : case RTE_FLOW_FIELD_TAG:
7388 : : case RTE_FLOW_FIELD_META:
7389 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
7390 : : break;
7391 : 0 : case RTE_FLOW_FIELD_RANDOM:
7392 [ # # ]: 0 : if (base_field == RTE_FLOW_FIELD_VALUE)
7393 : : return 0;
7394 : 0 : return rte_flow_error_set(error, EINVAL,
7395 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7396 : : NULL,
7397 : : "compare random is supported only with immediate value");
7398 : 0 : default:
7399 : 0 : return rte_flow_error_set(error, ENOTSUP,
7400 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7401 : : NULL,
7402 : : "compare item argument field is not supported");
7403 : : }
7404 [ # # ]: 0 : switch (base_field) {
7405 : : case RTE_FLOW_FIELD_TAG:
7406 : : case RTE_FLOW_FIELD_META:
7407 : : case RTE_FLOW_FIELD_VALUE:
7408 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
7409 : : break;
7410 : 0 : default:
7411 : 0 : return rte_flow_error_set(error, ENOTSUP,
7412 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7413 : : NULL,
7414 : : "compare item base field is not supported");
7415 : : }
7416 : : return 0;
7417 : : }
7418 : :
7419 : : static inline uint32_t
7420 : : flow_hw_item_compare_width_supported(enum rte_flow_field_id field)
7421 : : {
7422 [ # # # ]: 0 : switch (field) {
7423 : : case RTE_FLOW_FIELD_TAG:
7424 : : case RTE_FLOW_FIELD_META:
7425 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
7426 : : return 32;
7427 : 0 : case RTE_FLOW_FIELD_RANDOM:
7428 : 0 : return 16;
7429 : : default:
7430 : : break;
7431 : : }
7432 : 0 : return 0;
7433 : : }
7434 : :
7435 : : static int
7436 : 0 : flow_hw_validate_item_compare(const struct rte_flow_item *item,
7437 : : struct rte_flow_error *error)
7438 : : {
7439 : 0 : const struct rte_flow_item_compare *comp_m = item->mask;
7440 : 0 : const struct rte_flow_item_compare *comp_v = item->spec;
7441 : : int ret;
7442 : :
7443 [ # # ]: 0 : if (unlikely(!comp_m))
7444 : 0 : return rte_flow_error_set(error, EINVAL,
7445 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7446 : : NULL,
7447 : : "compare item mask is missing");
7448 [ # # ]: 0 : if (comp_m->width != UINT32_MAX)
7449 : 0 : return rte_flow_error_set(error, EINVAL,
7450 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7451 : : NULL,
7452 : : "compare item only support full mask");
7453 : 0 : ret = flow_hw_item_compare_field_validate(comp_m->a.field,
7454 : 0 : comp_m->b.field, error);
7455 [ # # ]: 0 : if (ret < 0)
7456 : : return ret;
7457 [ # # ]: 0 : if (comp_v) {
7458 : : uint32_t width;
7459 : :
7460 [ # # ]: 0 : if (comp_v->operation != comp_m->operation ||
7461 [ # # ]: 0 : comp_v->a.field != comp_m->a.field ||
7462 [ # # ]: 0 : comp_v->b.field != comp_m->b.field)
7463 : 0 : return rte_flow_error_set(error, EINVAL,
7464 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7465 : : NULL,
7466 : : "compare item spec/mask not matching");
7467 : : width = flow_hw_item_compare_width_supported(comp_v->a.field);
7468 : : MLX5_ASSERT(width > 0);
7469 [ # # ]: 0 : if ((comp_v->width & comp_m->width) != width)
7470 : 0 : return rte_flow_error_set(error, EINVAL,
7471 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7472 : : NULL,
7473 : : "compare item only support full mask");
7474 : : }
7475 : : return 0;
7476 : : }
7477 : :
7478 : : static int
7479 : 0 : flow_hw_pattern_validate(struct rte_eth_dev *dev,
7480 : : const struct rte_flow_pattern_template_attr *attr,
7481 : : const struct rte_flow_item items[],
7482 : : struct rte_flow_error *error)
7483 : : {
7484 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7485 : : int i, tag_idx;
7486 : : bool items_end = false;
7487 : : uint32_t tag_bitmap = 0;
7488 : : int ret;
7489 : :
7490 [ # # ]: 0 : if (!attr->ingress && !attr->egress && !attr->transfer)
7491 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
7492 : : "at least one of the direction attributes"
7493 : : " must be specified");
7494 [ # # ]: 0 : if (priv->sh->config.dv_esw_en) {
7495 : : MLX5_ASSERT(priv->master || priv->representor);
7496 [ # # ]: 0 : if (priv->master) {
7497 [ # # ]: 0 : if ((attr->ingress && attr->egress) ||
7498 [ # # ]: 0 : (attr->ingress && attr->transfer) ||
7499 [ # # ]: 0 : (attr->egress && attr->transfer))
7500 : 0 : return rte_flow_error_set(error, EINVAL,
7501 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
7502 : : "only one direction attribute at once"
7503 : : " can be used on transfer proxy port");
7504 : : } else {
7505 [ # # ]: 0 : if (attr->transfer)
7506 : 0 : return rte_flow_error_set(error, EINVAL,
7507 : : RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
7508 : : "transfer attribute cannot be used with"
7509 : : " port representors");
7510 [ # # ]: 0 : if (attr->ingress && attr->egress)
7511 : 0 : return rte_flow_error_set(error, EINVAL,
7512 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
7513 : : "ingress and egress direction attributes"
7514 : : " cannot be used at the same time on"
7515 : : " port representors");
7516 : : }
7517 : : } else {
7518 [ # # ]: 0 : if (attr->transfer)
7519 : 0 : return rte_flow_error_set(error, EINVAL,
7520 : : RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
7521 : : "transfer attribute cannot be used when"
7522 : : " E-Switch is disabled");
7523 : : }
7524 [ # # ]: 0 : for (i = 0; !items_end; i++) {
7525 : 0 : int type = items[i].type;
7526 : :
7527 [ # # # # : 0 : switch (type) {
# # # # #
# ]
7528 : 0 : case RTE_FLOW_ITEM_TYPE_TAG:
7529 : : {
7530 : 0 : const struct rte_flow_item_tag *tag =
7531 : : (const struct rte_flow_item_tag *)items[i].spec;
7532 : :
7533 [ # # ]: 0 : if (tag == NULL)
7534 : 0 : return rte_flow_error_set(error, EINVAL,
7535 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7536 : : NULL,
7537 : : "Tag spec is NULL");
7538 [ # # ]: 0 : if (tag->index >= MLX5_FLOW_HW_TAGS_MAX &&
7539 : : tag->index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
7540 : 0 : return rte_flow_error_set(error, EINVAL,
7541 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7542 : : NULL,
7543 : : "Invalid tag index");
7544 [ # # ]: 0 : tag_idx = flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, tag->index);
7545 [ # # ]: 0 : if (tag_idx == REG_NON)
7546 : 0 : return rte_flow_error_set(error, EINVAL,
7547 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7548 : : NULL,
7549 : : "Unsupported tag index");
7550 [ # # ]: 0 : if (tag_bitmap & (1 << tag_idx))
7551 : 0 : return rte_flow_error_set(error, EINVAL,
7552 : : RTE_FLOW_ERROR_TYPE_ITEM,
7553 : : NULL,
7554 : : "Duplicated tag index");
7555 : 0 : tag_bitmap |= 1 << tag_idx;
7556 : 0 : break;
7557 : : }
7558 : 0 : case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7559 : : {
7560 : 0 : const struct rte_flow_item_tag *tag =
7561 : : (const struct rte_flow_item_tag *)items[i].spec;
7562 : 0 : uint16_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
7563 : :
7564 [ # # ]: 0 : if (!((1 << (tag->index - REG_C_0)) & regcs))
7565 : 0 : return rte_flow_error_set(error, EINVAL,
7566 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7567 : : NULL,
7568 : : "Unsupported internal tag index");
7569 [ # # ]: 0 : if (tag_bitmap & (1 << tag->index))
7570 : 0 : return rte_flow_error_set(error, EINVAL,
7571 : : RTE_FLOW_ERROR_TYPE_ITEM,
7572 : : NULL,
7573 : : "Duplicated tag index");
7574 : 0 : tag_bitmap |= 1 << tag->index;
7575 : 0 : break;
7576 : : }
7577 : 0 : case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
7578 [ # # # # ]: 0 : if (attr->ingress && priv->sh->config.repr_matching)
7579 : 0 : return rte_flow_error_set(error, EINVAL,
7580 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
7581 : : "represented port item cannot be used"
7582 : : " when ingress attribute is set");
7583 [ # # ]: 0 : if (attr->egress)
7584 : 0 : return rte_flow_error_set(error, EINVAL,
7585 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
7586 : : "represented port item cannot be used"
7587 : : " when egress attribute is set");
7588 : : break;
7589 : : case RTE_FLOW_ITEM_TYPE_META:
7590 : : /* ingress + group 0 is not supported */
7591 : : break;
7592 : : case RTE_FLOW_ITEM_TYPE_METER_COLOR:
7593 : : {
7594 : : int reg = flow_hw_get_reg_id(dev,
7595 : : RTE_FLOW_ITEM_TYPE_METER_COLOR,
7596 : : 0);
7597 [ # # ]: 0 : if (reg == REG_NON)
7598 : 0 : return rte_flow_error_set(error, EINVAL,
7599 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7600 : : NULL,
7601 : : "Unsupported meter color register");
7602 : : break;
7603 : : }
7604 : 0 : case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
7605 : : {
7606 [ # # ]: 0 : if (!priv->sh->lag_rx_port_affinity_en)
7607 : 0 : return rte_flow_error_set(error, EINVAL,
7608 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
7609 : : "Unsupported aggregated affinity with Older FW");
7610 [ # # # # : 0 : if ((attr->transfer && priv->fdb_def_rule) || attr->egress)
# # ]
7611 : 0 : return rte_flow_error_set(error, EINVAL,
7612 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
7613 : : "Aggregated affinity item not supported"
7614 : : " with egress or transfer"
7615 : : " attribute");
7616 : : break;
7617 : : }
7618 : 0 : case RTE_FLOW_ITEM_TYPE_COMPARE:
7619 : : {
7620 : 0 : ret = flow_hw_validate_item_compare(&items[i], error);
7621 [ # # ]: 0 : if (ret)
7622 : 0 : return ret;
7623 : : break;
7624 : : }
7625 : 0 : case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
7626 : : {
7627 : : int ret;
7628 : :
7629 : 0 : ret = mlx5_flow_geneve_tlv_option_validate(priv,
7630 : : &items[i],
7631 : : error);
7632 [ # # ]: 0 : if (ret < 0)
7633 : 0 : return ret;
7634 : : break;
7635 : : }
7636 : : case RTE_FLOW_ITEM_TYPE_VOID:
7637 : : case RTE_FLOW_ITEM_TYPE_ETH:
7638 : : case RTE_FLOW_ITEM_TYPE_VLAN:
7639 : : case RTE_FLOW_ITEM_TYPE_IPV4:
7640 : : case RTE_FLOW_ITEM_TYPE_IPV6:
7641 : : case RTE_FLOW_ITEM_TYPE_UDP:
7642 : : case RTE_FLOW_ITEM_TYPE_TCP:
7643 : : case RTE_FLOW_ITEM_TYPE_GTP:
7644 : : case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7645 : : case RTE_FLOW_ITEM_TYPE_VXLAN:
7646 : : case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7647 : : case RTE_FLOW_ITEM_TYPE_MPLS:
7648 : : case RTE_FLOW_ITEM_TYPE_GENEVE:
7649 : : case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
7650 : : case RTE_FLOW_ITEM_TYPE_GRE:
7651 : : case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7652 : : case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
7653 : : case RTE_FLOW_ITEM_TYPE_ICMP:
7654 : : case RTE_FLOW_ITEM_TYPE_ICMP6:
7655 : : case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
7656 : : case RTE_FLOW_ITEM_TYPE_QUOTA:
7657 : : case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
7658 : : case RTE_FLOW_ITEM_TYPE_CONNTRACK:
7659 : : case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
7660 : : case RTE_FLOW_ITEM_TYPE_ESP:
7661 : : case RTE_FLOW_ITEM_TYPE_FLEX:
7662 : : case RTE_FLOW_ITEM_TYPE_IB_BTH:
7663 : : case RTE_FLOW_ITEM_TYPE_PTYPE:
7664 : : case RTE_FLOW_ITEM_TYPE_RANDOM:
7665 : : break;
7666 : : case RTE_FLOW_ITEM_TYPE_INTEGRITY:
7667 : : /*
7668 : : * Integrity flow item validation require access to
7669 : : * both item mask and spec.
7670 : : * Current HWS model allows item mask in pattern
7671 : : * template and item spec in flow rule.
7672 : : */
7673 : : break;
7674 : 0 : case RTE_FLOW_ITEM_TYPE_END:
7675 : : items_end = true;
7676 : 0 : break;
7677 : 0 : default:
7678 : 0 : return rte_flow_error_set(error, EINVAL,
7679 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7680 : : NULL,
7681 : : "Unsupported item type");
7682 : : }
7683 : : }
7684 : : return 0;
7685 : : }
7686 : :
7687 : : static bool
7688 : : flow_hw_pattern_has_sq_match(const struct rte_flow_item *items)
7689 : : {
7690 : : unsigned int i;
7691 : :
7692 [ # # ]: 0 : for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i)
7693 [ # # ]: 0 : if (items[i].type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ)
7694 : : return true;
7695 : : return false;
7696 : : }
7697 : :
7698 : : /*
7699 : : * Verify that the tested flow patterns fits STE size limit in HWS group.
7700 : : *
7701 : : *
7702 : : * Return values:
7703 : : * 0 : Tested patterns fit STE size limit
7704 : : * -EINVAL : Invalid parameters detected
7705 : : * -E2BIG : Tested patterns exceed STE size limit
7706 : : */
7707 : : static int
7708 : 0 : pattern_template_validate(struct rte_eth_dev *dev,
7709 : : struct rte_flow_pattern_template *pt[], uint32_t pt_num)
7710 : : {
7711 : : struct rte_flow_error error;
7712 : 0 : struct mlx5_flow_template_table_cfg tbl_cfg = {
7713 : : .attr = {
7714 : : .nb_flows = 64,
7715 : : .insertion_type = RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
7716 : : .hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
7717 : : .flow_attr = {
7718 : : .group = 1,
7719 : 0 : .ingress = pt[0]->attr.ingress,
7720 : 0 : .egress = pt[0]->attr.egress,
7721 : 0 : .transfer = pt[0]->attr.transfer
7722 : : }
7723 : : }
7724 : : };
7725 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7726 : : struct rte_flow_actions_template *action_template;
7727 : : struct rte_flow_template_table *tmpl_tbl;
7728 : : int ret;
7729 : :
7730 [ # # ]: 0 : if (pt[0]->attr.ingress)
7731 : 0 : action_template = priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
7732 [ # # ]: 0 : else if (pt[0]->attr.egress)
7733 : 0 : action_template = priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
7734 [ # # ]: 0 : else if (pt[0]->attr.transfer)
7735 : 0 : action_template = priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
7736 : : else
7737 : : return -EINVAL;
7738 [ # # ]: 0 : if (pt[0]->item_flags & MLX5_FLOW_ITEM_COMPARE)
7739 : 0 : tbl_cfg.attr.nb_flows = 1;
7740 : 0 : tmpl_tbl = flow_hw_table_create(dev, &tbl_cfg, pt, pt_num,
7741 : : &action_template, 1, NULL);
7742 [ # # ]: 0 : if (tmpl_tbl) {
7743 : : ret = 0;
7744 : 0 : flow_hw_table_destroy(dev, tmpl_tbl, &error);
7745 : : } else {
7746 [ # # ]: 0 : ret = rte_errno == E2BIG ? -E2BIG : 0;
7747 : : }
7748 : : return ret;
7749 : : }
7750 : :
7751 : : /**
7752 : : * Create flow item template.
7753 : : *
7754 : : * @param[in] dev
7755 : : * Pointer to the rte_eth_dev structure.
7756 : : * @param[in] attr
7757 : : * Pointer to the item template attributes.
7758 : : * @param[in] items
7759 : : * The template item pattern.
7760 : : * @param[out] error
7761 : : * Pointer to error structure.
7762 : : *
7763 : : * @return
7764 : : * Item template pointer on success, NULL otherwise and rte_errno is set.
7765 : : */
7766 : : static struct rte_flow_pattern_template *
7767 : 0 : flow_hw_pattern_template_create(struct rte_eth_dev *dev,
7768 : : const struct rte_flow_pattern_template_attr *attr,
7769 : : const struct rte_flow_item items[],
7770 : : struct rte_flow_error *error)
7771 : : {
7772 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7773 : : struct rte_flow_pattern_template *it;
7774 : : struct rte_flow_item *copied_items = NULL;
7775 : : const struct rte_flow_item *tmpl_items;
7776 : : uint32_t orig_item_nb;
7777 : 0 : struct rte_flow_item port = {
7778 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
7779 : : .mask = &rte_flow_item_ethdev_mask,
7780 : : };
7781 : 0 : struct rte_flow_item_tag tag_v = {
7782 : : .data = 0,
7783 : : .index = REG_C_0,
7784 : : };
7785 : 0 : struct rte_flow_item_tag tag_m = {
7786 : : .data = flow_hw_tx_tag_regc_mask(dev),
7787 : : .index = 0xff,
7788 : : };
7789 : 0 : struct rte_flow_item tag = {
7790 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
7791 : : .spec = &tag_v,
7792 : : .mask = &tag_m,
7793 : : .last = NULL
7794 : : };
7795 : : unsigned int i = 0;
7796 : :
7797 [ # # ]: 0 : if (flow_hw_pattern_validate(dev, attr, items, error))
7798 : : return NULL;
7799 : : orig_item_nb = flow_hw_count_items(items);
7800 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en &&
7801 : : priv->sh->config.repr_matching &&
7802 [ # # ]: 0 : attr->ingress && !attr->egress && !attr->transfer) {
7803 : 0 : copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
7804 [ # # ]: 0 : if (!copied_items)
7805 : : return NULL;
7806 : : tmpl_items = copied_items;
7807 [ # # # # ]: 0 : } else if (priv->sh->config.dv_esw_en &&
7808 : : priv->sh->config.repr_matching &&
7809 [ # # ]: 0 : !attr->ingress && attr->egress && !attr->transfer) {
7810 [ # # ]: 0 : if (flow_hw_pattern_has_sq_match(items)) {
7811 : 0 : DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
7812 : : "pattern template", dev->data->port_id);
7813 : : tmpl_items = items;
7814 : 0 : goto setup_pattern_template;
7815 : : }
7816 : 0 : copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
7817 [ # # ]: 0 : if (!copied_items)
7818 : : return NULL;
7819 : : tmpl_items = copied_items;
7820 : : } else {
7821 : : tmpl_items = items;
7822 : : }
7823 : 0 : setup_pattern_template:
7824 : 0 : it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
7825 [ # # ]: 0 : if (!it) {
7826 [ # # ]: 0 : if (copied_items)
7827 : 0 : mlx5_free(copied_items);
7828 : 0 : rte_flow_error_set(error, ENOMEM,
7829 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7830 : : NULL,
7831 : : "cannot allocate item template");
7832 : 0 : return NULL;
7833 : : }
7834 : 0 : it->attr = *attr;
7835 : 0 : it->orig_item_nb = orig_item_nb;
7836 : 0 : it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
7837 [ # # ]: 0 : if (!it->mt) {
7838 [ # # ]: 0 : if (copied_items)
7839 : 0 : mlx5_free(copied_items);
7840 : 0 : mlx5_free(it);
7841 : 0 : rte_flow_error_set(error, rte_errno,
7842 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7843 : : NULL,
7844 : : "cannot create match template");
7845 : 0 : return NULL;
7846 : : }
7847 : 0 : it->item_flags = flow_hw_matching_item_flags_get(tmpl_items);
7848 [ # # ]: 0 : if (copied_items) {
7849 [ # # ]: 0 : if (attr->ingress)
7850 : 0 : it->implicit_port = true;
7851 [ # # ]: 0 : else if (attr->egress)
7852 : 0 : it->implicit_tag = true;
7853 : 0 : mlx5_free(copied_items);
7854 : : }
7855 : : /* Either inner or outer, can't both. */
7856 [ # # ]: 0 : if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
7857 : : MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
7858 [ # # ]: 0 : if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
7859 [ # # ]: 0 : (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
7860 : 0 : (mlx5_alloc_srh_flex_parser(dev))) {
7861 : 0 : claim_zero(mlx5dr_match_template_destroy(it->mt));
7862 : 0 : mlx5_free(it);
7863 : 0 : rte_flow_error_set(error, rte_errno,
7864 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7865 : : "cannot create IPv6 routing extension support");
7866 : 0 : return NULL;
7867 : : }
7868 : : }
7869 [ # # ]: 0 : for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
7870 [ # # # ]: 0 : switch (items[i].type) {
7871 : 0 : case RTE_FLOW_ITEM_TYPE_FLEX: {
7872 : 0 : const struct rte_flow_item_flex *spec =
7873 : : (const struct rte_flow_item_flex *)items[i].spec;
7874 : 0 : struct rte_flow_item_flex_handle *handle = spec->handle;
7875 : :
7876 [ # # ]: 0 : if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
7877 : 0 : rte_flow_error_set(error, rte_errno,
7878 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7879 : : "Failed to acquire flex item");
7880 : 0 : goto error;
7881 : : }
7882 : : break;
7883 : : }
7884 : 0 : case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: {
7885 : 0 : const struct rte_flow_item_geneve_opt *spec = items[i].spec;
7886 : :
7887 [ # # ]: 0 : if (mlx5_geneve_tlv_option_register(priv, spec,
7888 : 0 : &it->geneve_opt_mng)) {
7889 : 0 : rte_flow_error_set(error, rte_errno,
7890 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7891 : : "Failed to register GENEVE TLV option");
7892 : 0 : goto error;
7893 : : }
7894 : : break;
7895 : : }
7896 : : default:
7897 : : break;
7898 : : }
7899 : : }
7900 : 0 : __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
7901 : 0 : rte_errno = pattern_template_validate(dev, &it, 1);
7902 [ # # ]: 0 : if (rte_errno)
7903 : 0 : goto error;
7904 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
7905 : 0 : return it;
7906 : 0 : error:
7907 : 0 : flow_hw_flex_item_release(dev, &it->flex_item);
7908 : 0 : mlx5_geneve_tlv_options_unregister(priv, &it->geneve_opt_mng);
7909 : 0 : claim_zero(mlx5dr_match_template_destroy(it->mt));
7910 : 0 : mlx5_free(it);
7911 : 0 : rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7912 : : "Failed to create pattern template");
7913 : 0 : return NULL;
7914 : : }
7915 : :
7916 : : /**
7917 : : * Destroy flow item template.
7918 : : *
7919 : : * @param[in] dev
7920 : : * Pointer to the rte_eth_dev structure.
7921 : : * @param[in] template
7922 : : * Pointer to the item template to be destroyed.
7923 : : * @param[out] error
7924 : : * Pointer to error structure.
7925 : : *
7926 : : * @return
7927 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
7928 : : */
7929 : : static int
7930 : 0 : flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
7931 : : struct rte_flow_pattern_template *template,
7932 : : struct rte_flow_error *error __rte_unused)
7933 : : {
7934 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7935 : :
7936 [ # # ]: 0 : if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
7937 : 0 : DRV_LOG(WARNING, "Item template %p is still in use.",
7938 : : (void *)template);
7939 : 0 : return rte_flow_error_set(error, EBUSY,
7940 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7941 : : NULL,
7942 : : "item template in using");
7943 : : }
7944 [ # # ]: 0 : if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
7945 : : MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
7946 : 0 : mlx5_free_srh_flex_parser(dev);
7947 [ # # ]: 0 : LIST_REMOVE(template, next);
7948 : 0 : flow_hw_flex_item_release(dev, &template->flex_item);
7949 : 0 : mlx5_geneve_tlv_options_unregister(priv, &template->geneve_opt_mng);
7950 : 0 : claim_zero(mlx5dr_match_template_destroy(template->mt));
7951 : 0 : mlx5_free(template);
7952 : 0 : return 0;
7953 : : }
7954 : :
7955 : : /*
7956 : : * Get information about HWS pre-configurable resources.
7957 : : *
7958 : : * @param[in] dev
7959 : : * Pointer to the rte_eth_dev structure.
7960 : : * @param[out] port_info
7961 : : * Pointer to port information.
7962 : : * @param[out] queue_info
7963 : : * Pointer to queue information.
7964 : : * @param[out] error
7965 : : * Pointer to error structure.
7966 : : *
7967 : : * @return
7968 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
7969 : : */
7970 : : static int
7971 : 0 : flow_hw_info_get(struct rte_eth_dev *dev,
7972 : : struct rte_flow_port_info *port_info,
7973 : : struct rte_flow_queue_info *queue_info,
7974 : : struct rte_flow_error *error __rte_unused)
7975 : : {
7976 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7977 : 0 : uint16_t port_id = dev->data->port_id;
7978 : : struct rte_mtr_capabilities mtr_cap;
7979 : : int ret;
7980 : :
7981 : : memset(port_info, 0, sizeof(*port_info));
7982 : : /* Queue size is unlimited from low-level. */
7983 : 0 : port_info->max_nb_queues = UINT32_MAX;
7984 : 0 : queue_info->max_size = UINT32_MAX;
7985 : :
7986 : : memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
7987 : 0 : ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
7988 [ # # ]: 0 : if (!ret)
7989 : 0 : port_info->max_nb_meters = mtr_cap.n_max;
7990 : 0 : port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
7991 : 0 : port_info->max_nb_aging_objects = port_info->max_nb_counters;
7992 : 0 : return 0;
7993 : : }
7994 : :
7995 : : /**
7996 : : * Create group callback.
7997 : : *
7998 : : * @param[in] tool_ctx
7999 : : * Pointer to the hash list related context.
8000 : : * @param[in] cb_ctx
8001 : : * Pointer to the group creation context.
8002 : : *
8003 : : * @return
8004 : : * Group entry on success, NULL otherwise and rte_errno is set.
8005 : : */
8006 : : struct mlx5_list_entry *
8007 : 0 : flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
8008 : : {
8009 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
8010 : : struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8011 : 0 : struct rte_eth_dev *dev = ctx->dev;
8012 : 0 : struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
8013 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8014 : 0 : struct mlx5dr_table_attr dr_tbl_attr = {0};
8015 : 0 : struct rte_flow_error *error = ctx->error;
8016 : : struct mlx5_flow_group *grp_data;
8017 : : struct mlx5dr_table *tbl = NULL;
8018 : : struct mlx5dr_action *jump;
8019 : 0 : uint32_t idx = 0;
8020 : :
8021 : 0 : grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
8022 [ # # ]: 0 : if (!grp_data) {
8023 : 0 : rte_flow_error_set(error, ENOMEM,
8024 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8025 : : NULL,
8026 : : "cannot allocate flow table data entry");
8027 : 0 : return NULL;
8028 : : }
8029 : 0 : dr_tbl_attr.level = attr->group;
8030 [ # # ]: 0 : if (attr->transfer)
8031 : 0 : dr_tbl_attr.type = MLX5DR_TABLE_TYPE_FDB;
8032 [ # # ]: 0 : else if (attr->egress)
8033 : 0 : dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_TX;
8034 : : else
8035 : 0 : dr_tbl_attr.type = MLX5DR_TABLE_TYPE_NIC_RX;
8036 : 0 : tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
8037 [ # # ]: 0 : if (!tbl)
8038 : 0 : goto error;
8039 : 0 : grp_data->tbl = tbl;
8040 [ # # ]: 0 : if (attr->group) {
8041 : : /* Jump action be used by non-root table. */
8042 : 0 : jump = mlx5dr_action_create_dest_table
8043 : : (priv->dr_ctx, tbl,
8044 : 0 : mlx5_hw_act_flag[!!attr->group][dr_tbl_attr.type]);
8045 [ # # ]: 0 : if (!jump)
8046 : 0 : goto error;
8047 : 0 : grp_data->jump.hws_action = jump;
8048 : : /* Jump action be used by root table. */
8049 : 0 : jump = mlx5dr_action_create_dest_table
8050 : : (priv->dr_ctx, tbl,
8051 : : mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
8052 : 0 : [dr_tbl_attr.type]);
8053 [ # # ]: 0 : if (!jump)
8054 : 0 : goto error;
8055 : 0 : grp_data->jump.root_action = jump;
8056 : : }
8057 : 0 : grp_data->dev = dev;
8058 : 0 : grp_data->idx = idx;
8059 : 0 : grp_data->group_id = attr->group;
8060 : 0 : grp_data->type = dr_tbl_attr.type;
8061 : 0 : return &grp_data->entry;
8062 : 0 : error:
8063 [ # # ]: 0 : if (grp_data->jump.root_action)
8064 : 0 : mlx5dr_action_destroy(grp_data->jump.root_action);
8065 [ # # ]: 0 : if (grp_data->jump.hws_action)
8066 : 0 : mlx5dr_action_destroy(grp_data->jump.hws_action);
8067 [ # # ]: 0 : if (tbl)
8068 : 0 : mlx5dr_table_destroy(tbl);
8069 [ # # ]: 0 : if (idx)
8070 : 0 : mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
8071 : 0 : rte_flow_error_set(error, ENOMEM,
8072 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8073 : : NULL,
8074 : : "cannot allocate flow dr table");
8075 : 0 : return NULL;
8076 : : }
8077 : :
8078 : : /**
8079 : : * Remove group callback.
8080 : : *
8081 : : * @param[in] tool_ctx
8082 : : * Pointer to the hash list related context.
8083 : : * @param[in] entry
8084 : : * Pointer to the entry to be removed.
8085 : : */
8086 : : void
8087 : 0 : flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
8088 : : {
8089 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
8090 : : struct mlx5_flow_group *grp_data =
8091 : : container_of(entry, struct mlx5_flow_group, entry);
8092 : :
8093 : : MLX5_ASSERT(entry && sh);
8094 : : /* To use the wrapper glue functions instead. */
8095 [ # # ]: 0 : if (grp_data->jump.hws_action)
8096 : 0 : mlx5dr_action_destroy(grp_data->jump.hws_action);
8097 [ # # ]: 0 : if (grp_data->jump.root_action)
8098 : 0 : mlx5dr_action_destroy(grp_data->jump.root_action);
8099 : 0 : mlx5dr_table_destroy(grp_data->tbl);
8100 : 0 : mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
8101 : 0 : }
8102 : :
8103 : : /**
8104 : : * Match group callback.
8105 : : *
8106 : : * @param[in] tool_ctx
8107 : : * Pointer to the hash list related context.
8108 : : * @param[in] entry
8109 : : * Pointer to the group to be matched.
8110 : : * @param[in] cb_ctx
8111 : : * Pointer to the group matching context.
8112 : : *
8113 : : * @return
8114 : : * 0 on matched, 1 on miss matched.
8115 : : */
8116 : : int
8117 : 0 : flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
8118 : : void *cb_ctx)
8119 : : {
8120 : : struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8121 : : struct mlx5_flow_group *grp_data =
8122 : : container_of(entry, struct mlx5_flow_group, entry);
8123 : 0 : struct rte_flow_attr *attr =
8124 : : (struct rte_flow_attr *)ctx->data;
8125 : :
8126 : 0 : return (grp_data->dev != ctx->dev) ||
8127 [ # # ]: 0 : (grp_data->group_id != attr->group) ||
8128 [ # # # # ]: 0 : ((grp_data->type != MLX5DR_TABLE_TYPE_FDB) &&
8129 [ # # ]: 0 : attr->transfer) ||
8130 [ # # ]: 0 : ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
8131 [ # # # # ]: 0 : attr->egress) ||
8132 [ # # ]: 0 : ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
8133 : : attr->ingress);
8134 : : }
8135 : :
8136 : : /**
8137 : : * Clone group entry callback.
8138 : : *
8139 : : * @param[in] tool_ctx
8140 : : * Pointer to the hash list related context.
8141 : : * @param[in] entry
8142 : : * Pointer to the group to be matched.
8143 : : * @param[in] cb_ctx
8144 : : * Pointer to the group matching context.
8145 : : *
8146 : : * @return
8147 : : * 0 on matched, 1 on miss matched.
8148 : : */
8149 : : struct mlx5_list_entry *
8150 : 0 : flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
8151 : : void *cb_ctx)
8152 : : {
8153 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
8154 : : struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8155 : : struct mlx5_flow_group *grp_data;
8156 : 0 : struct rte_flow_error *error = ctx->error;
8157 : 0 : uint32_t idx = 0;
8158 : :
8159 : 0 : grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
8160 [ # # ]: 0 : if (!grp_data) {
8161 : 0 : rte_flow_error_set(error, ENOMEM,
8162 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8163 : : NULL,
8164 : : "cannot allocate flow table data entry");
8165 : 0 : return NULL;
8166 : : }
8167 : : memcpy(grp_data, oentry, sizeof(*grp_data));
8168 : 0 : grp_data->idx = idx;
8169 : 0 : return &grp_data->entry;
8170 : : }
8171 : :
8172 : : /**
8173 : : * Free cloned group entry callback.
8174 : : *
8175 : : * @param[in] tool_ctx
8176 : : * Pointer to the hash list related context.
8177 : : * @param[in] entry
8178 : : * Pointer to the group to be freed.
8179 : : */
8180 : : void
8181 : 0 : flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
8182 : : {
8183 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
8184 : : struct mlx5_flow_group *grp_data =
8185 : : container_of(entry, struct mlx5_flow_group, entry);
8186 : :
8187 : 0 : mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
8188 : 0 : }
8189 : :
8190 : : /**
8191 : : * Create and cache a vport action for given @p dev port. vport actions
8192 : : * cache is used in HWS with FDB flows.
8193 : : *
8194 : : * This function does not create any function if proxy port for @p dev port
8195 : : * was not configured for HW Steering.
8196 : : *
8197 : : * This function assumes that E-Switch is enabled and PMD is running with
8198 : : * HW Steering configured.
8199 : : *
8200 : : * @param dev
8201 : : * Pointer to Ethernet device which will be the action destination.
8202 : : *
8203 : : * @return
8204 : : * 0 on success, positive value otherwise.
8205 : : */
8206 : : int
8207 : 0 : flow_hw_create_vport_action(struct rte_eth_dev *dev)
8208 : : {
8209 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8210 : : struct rte_eth_dev *proxy_dev;
8211 : : struct mlx5_priv *proxy_priv;
8212 : 0 : uint16_t port_id = dev->data->port_id;
8213 : 0 : uint16_t proxy_port_id = port_id;
8214 : : int ret;
8215 : :
8216 : 0 : ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
8217 [ # # ]: 0 : if (ret)
8218 : : return ret;
8219 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
8220 : 0 : proxy_priv = proxy_dev->data->dev_private;
8221 [ # # ]: 0 : if (!proxy_priv->hw_vport)
8222 : : return 0;
8223 [ # # ]: 0 : if (proxy_priv->hw_vport[port_id]) {
8224 : 0 : DRV_LOG(ERR, "port %u HWS vport action already created",
8225 : : port_id);
8226 : 0 : return -EINVAL;
8227 : : }
8228 : 0 : proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
8229 : : (proxy_priv->dr_ctx, priv->dev_port,
8230 : : MLX5DR_ACTION_FLAG_HWS_FDB);
8231 [ # # ]: 0 : if (!proxy_priv->hw_vport[port_id]) {
8232 : 0 : DRV_LOG(ERR, "port %u unable to create HWS vport action",
8233 : : port_id);
8234 : 0 : return -EINVAL;
8235 : : }
8236 : : return 0;
8237 : : }
8238 : :
8239 : : /**
8240 : : * Destroys the vport action associated with @p dev device
8241 : : * from actions' cache.
8242 : : *
8243 : : * This function does not destroy any action if there is no action cached
8244 : : * for @p dev or proxy port was not configured for HW Steering.
8245 : : *
8246 : : * This function assumes that E-Switch is enabled and PMD is running with
8247 : : * HW Steering configured.
8248 : : *
8249 : : * @param dev
8250 : : * Pointer to Ethernet device which will be the action destination.
8251 : : */
8252 : : void
8253 : 0 : flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
8254 : : {
8255 : : struct rte_eth_dev *proxy_dev;
8256 : : struct mlx5_priv *proxy_priv;
8257 : 0 : uint16_t port_id = dev->data->port_id;
8258 : 0 : uint16_t proxy_port_id = port_id;
8259 : :
8260 [ # # ]: 0 : if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
8261 : 0 : return;
8262 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
8263 : 0 : proxy_priv = proxy_dev->data->dev_private;
8264 [ # # # # ]: 0 : if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
8265 : : return;
8266 : 0 : mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
8267 : 0 : proxy_priv->hw_vport[port_id] = NULL;
8268 : : }
8269 : :
8270 : : static int
8271 : 0 : flow_hw_create_vport_actions(struct mlx5_priv *priv)
8272 : : {
8273 : : uint16_t port_id;
8274 : :
8275 : : MLX5_ASSERT(!priv->hw_vport);
8276 : 0 : priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
8277 : : sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
8278 : : 0, SOCKET_ID_ANY);
8279 [ # # ]: 0 : if (!priv->hw_vport)
8280 : : return -ENOMEM;
8281 : 0 : DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
8282 : 0 : DRV_LOG(DEBUG, "port %u :: domain_id=%u", priv->dev_data->port_id, priv->domain_id);
8283 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, NULL) {
8284 : 0 : struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
8285 : :
8286 [ # # ]: 0 : if (!port_priv ||
8287 [ # # ]: 0 : port_priv->domain_id != priv->domain_id)
8288 : 0 : continue;
8289 : 0 : DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
8290 : : priv->dev_data->port_id, port_id, port_priv->dev_port);
8291 : 0 : priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
8292 : : (priv->dr_ctx, port_priv->dev_port,
8293 : : MLX5DR_ACTION_FLAG_HWS_FDB);
8294 : 0 : DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
8295 : : priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
8296 [ # # ]: 0 : if (!priv->hw_vport[port_id])
8297 : : return -EINVAL;
8298 : : }
8299 : : return 0;
8300 : : }
8301 : :
8302 : : static void
8303 : 0 : flow_hw_free_vport_actions(struct mlx5_priv *priv)
8304 : : {
8305 : : uint16_t port_id;
8306 : :
8307 [ # # ]: 0 : if (!priv->hw_vport)
8308 : : return;
8309 [ # # ]: 0 : for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
8310 [ # # ]: 0 : if (priv->hw_vport[port_id])
8311 : 0 : mlx5dr_action_destroy(priv->hw_vport[port_id]);
8312 : 0 : mlx5_free(priv->hw_vport);
8313 : 0 : priv->hw_vport = NULL;
8314 : : }
8315 : :
8316 : : static void
8317 : : flow_hw_create_send_to_kernel_actions(struct mlx5_priv *priv __rte_unused)
8318 : : {
8319 : : #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
8320 : : int action_flag;
8321 : : int i;
8322 : : bool is_vf_sf_dev = priv->sh->dev_cap.vf || priv->sh->dev_cap.sf;
8323 : :
8324 : : for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
8325 : : if ((!priv->sh->config.dv_esw_en || is_vf_sf_dev) &&
8326 : : i == MLX5DR_TABLE_TYPE_FDB)
8327 : : continue;
8328 : : action_flag = mlx5_hw_act_flag[1][i];
8329 : : priv->hw_send_to_kernel[i] =
8330 : : mlx5dr_action_create_dest_root(priv->dr_ctx,
8331 : : MLX5_HW_LOWEST_PRIO_ROOT,
8332 : : action_flag);
8333 : : if (!priv->hw_send_to_kernel[i]) {
8334 : : DRV_LOG(WARNING, "Unable to create HWS send to kernel action");
8335 : : return;
8336 : : }
8337 : : }
8338 : : #endif
8339 : : }
8340 : :
8341 : : static void
8342 : 0 : flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
8343 : : {
8344 : : int i;
8345 [ # # ]: 0 : for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
8346 [ # # ]: 0 : if (priv->hw_send_to_kernel[i]) {
8347 : 0 : mlx5dr_action_destroy(priv->hw_send_to_kernel[i]);
8348 : 0 : priv->hw_send_to_kernel[i] = NULL;
8349 : : }
8350 : : }
8351 : 0 : }
8352 : :
8353 : : static void
8354 : 0 : flow_hw_destroy_nat64_actions(struct mlx5_priv *priv)
8355 : : {
8356 : : uint32_t i;
8357 : :
8358 [ # # ]: 0 : for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
8359 [ # # ]: 0 : if (priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]) {
8360 : 0 : (void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_6TO4]);
8361 : 0 : priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = NULL;
8362 : : }
8363 [ # # ]: 0 : if (priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]) {
8364 : 0 : (void)mlx5dr_action_destroy(priv->action_nat64[i][RTE_FLOW_NAT64_4TO6]);
8365 : 0 : priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = NULL;
8366 : : }
8367 : : }
8368 : 0 : }
8369 : :
8370 : : static int
8371 : 0 : flow_hw_create_nat64_actions(struct mlx5_priv *priv, struct rte_flow_error *error)
8372 : : {
8373 : : struct mlx5dr_action_nat64_attr attr;
8374 : : uint8_t regs[MLX5_FLOW_NAT64_REGS_MAX];
8375 : : uint32_t i;
8376 : 0 : const uint32_t flags[MLX5DR_TABLE_TYPE_MAX] = {
8377 : : MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED,
8378 : : MLX5DR_ACTION_FLAG_HWS_TX | MLX5DR_ACTION_FLAG_SHARED,
8379 : : MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED,
8380 : : };
8381 : : struct mlx5dr_action *act;
8382 : :
8383 : 0 : attr.registers = regs;
8384 : : /* Try to use 3 registers by default. */
8385 : 0 : attr.num_of_registers = MLX5_FLOW_NAT64_REGS_MAX;
8386 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_NAT64_REGS_MAX; i++) {
8387 : : MLX5_ASSERT(priv->sh->registers.nat64_regs[i] != REG_NON);
8388 : 0 : regs[i] = mlx5_convert_reg_to_field(priv->sh->registers.nat64_regs[i]);
8389 : : }
8390 [ # # ]: 0 : for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
8391 [ # # # # ]: 0 : if (i == MLX5DR_TABLE_TYPE_FDB && !priv->sh->config.dv_esw_en)
8392 : 0 : continue;
8393 : 0 : attr.flags = (enum mlx5dr_action_nat64_flags)
8394 : : (MLX5DR_ACTION_NAT64_V6_TO_V4 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
8395 : 0 : act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
8396 [ # # ]: 0 : if (!act)
8397 : 0 : return rte_flow_error_set(error, rte_errno,
8398 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8399 : : "Failed to create v6 to v4 action.");
8400 : 0 : priv->action_nat64[i][RTE_FLOW_NAT64_6TO4] = act;
8401 : 0 : attr.flags = (enum mlx5dr_action_nat64_flags)
8402 : : (MLX5DR_ACTION_NAT64_V4_TO_V6 | MLX5DR_ACTION_NAT64_BACKUP_ADDR);
8403 : 0 : act = mlx5dr_action_create_nat64(priv->dr_ctx, &attr, flags[i]);
8404 [ # # ]: 0 : if (!act)
8405 : 0 : return rte_flow_error_set(error, rte_errno,
8406 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8407 : : "Failed to create v4 to v6 action.");
8408 : 0 : priv->action_nat64[i][RTE_FLOW_NAT64_4TO6] = act;
8409 : : }
8410 : : return 0;
8411 : : }
8412 : :
8413 : : /**
8414 : : * Create an egress pattern template matching on source SQ.
8415 : : *
8416 : : * @param dev
8417 : : * Pointer to Ethernet device.
8418 : : * @param[out] error
8419 : : * Pointer to error structure.
8420 : : *
8421 : : * @return
8422 : : * Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
8423 : : */
8424 : : static struct rte_flow_pattern_template *
8425 : 0 : flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error)
8426 : : {
8427 : 0 : struct rte_flow_pattern_template_attr attr = {
8428 : : .relaxed_matching = 0,
8429 : : .egress = 1,
8430 : : };
8431 : 0 : struct mlx5_rte_flow_item_sq sq_mask = {
8432 : : .queue = UINT32_MAX,
8433 : : };
8434 : 0 : struct rte_flow_item items[] = {
8435 : : {
8436 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
8437 : : .mask = &sq_mask,
8438 : : },
8439 : : {
8440 : : .type = RTE_FLOW_ITEM_TYPE_END,
8441 : : },
8442 : : };
8443 : :
8444 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, error);
8445 : : }
8446 : :
8447 : : static __rte_always_inline uint32_t
8448 : : flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
8449 : : {
8450 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8451 : 0 : uint32_t mask = priv->sh->dv_regc0_mask;
8452 : :
8453 : : /* Mask is verified during device initialization. Sanity checking here. */
8454 : : MLX5_ASSERT(mask != 0);
8455 : : /*
8456 : : * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
8457 : : * Sanity checking here.
8458 : : */
8459 : : MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
8460 : : return mask;
8461 : : }
8462 : :
8463 : : static __rte_always_inline uint32_t
8464 : : flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
8465 : : {
8466 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8467 : : uint32_t tag;
8468 : :
8469 : : /* Mask is verified during device initialization. Sanity checking here. */
8470 : : MLX5_ASSERT(priv->vport_meta_mask != 0);
8471 [ # # ]: 0 : tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
8472 : : /*
8473 : : * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
8474 : : * Sanity checking here.
8475 : : */
8476 : : MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
8477 : : return tag;
8478 : : }
8479 : :
8480 : : static void
8481 : : flow_hw_update_action_mask(struct rte_flow_action *action,
8482 : : struct rte_flow_action *mask,
8483 : : enum rte_flow_action_type type,
8484 : : void *conf_v,
8485 : : void *conf_m)
8486 : : {
8487 : 0 : action->type = type;
8488 : 0 : action->conf = conf_v;
8489 : 0 : mask->type = type;
8490 : 0 : mask->conf = conf_m;
8491 : : }
8492 : :
8493 : : /**
8494 : : * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
8495 : : * to vport tag and JUMP action to group 1.
8496 : : *
8497 : : * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
8498 : : * to REG_C_1 is added as well.
8499 : : *
8500 : : * @param dev
8501 : : * Pointer to Ethernet device.
8502 : : * @param[out] error
8503 : : * Pointer to error structure.
8504 : : *
8505 : : * @return
8506 : : * Pointer to actions template on success. NULL otherwise, and rte_errno is set.
8507 : : */
8508 : : static struct rte_flow_actions_template *
8509 : 0 : flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
8510 : : struct rte_flow_error *error)
8511 : : {
8512 [ # # ]: 0 : uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
8513 : 0 : uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
8514 : 0 : struct rte_flow_actions_template_attr attr = {
8515 : : .egress = 1,
8516 : : };
8517 [ # # ]: 0 : struct rte_flow_action_modify_field set_tag_v = {
8518 : : .operation = RTE_FLOW_MODIFY_SET,
8519 : : .dst = {
8520 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8521 : : .tag_index = REG_C_0,
8522 : : .offset = rte_bsf32(tag_mask),
8523 : : },
8524 : : .src = {
8525 : : .field = RTE_FLOW_FIELD_VALUE,
8526 : : },
8527 : : .width = rte_popcount32(tag_mask),
8528 : : };
8529 : 0 : struct rte_flow_action_modify_field set_tag_m = {
8530 : : .operation = RTE_FLOW_MODIFY_SET,
8531 : : .dst = {
8532 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8533 : : .level = UINT8_MAX,
8534 : : .tag_index = UINT8_MAX,
8535 : : .offset = UINT32_MAX,
8536 : : },
8537 : : .src = {
8538 : : .field = RTE_FLOW_FIELD_VALUE,
8539 : : },
8540 : : .width = UINT32_MAX,
8541 : : };
8542 : 0 : struct rte_flow_action_modify_field copy_metadata_v = {
8543 : : .operation = RTE_FLOW_MODIFY_SET,
8544 : : .dst = {
8545 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8546 : : .tag_index = REG_C_1,
8547 : : },
8548 : : .src = {
8549 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8550 : : .tag_index = REG_A,
8551 : : },
8552 : : .width = 32,
8553 : : };
8554 : 0 : struct rte_flow_action_modify_field copy_metadata_m = {
8555 : : .operation = RTE_FLOW_MODIFY_SET,
8556 : : .dst = {
8557 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8558 : : .level = UINT8_MAX,
8559 : : .tag_index = UINT8_MAX,
8560 : : .offset = UINT32_MAX,
8561 : : },
8562 : : .src = {
8563 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8564 : : .level = UINT8_MAX,
8565 : : .tag_index = UINT8_MAX,
8566 : : .offset = UINT32_MAX,
8567 : : },
8568 : : .width = UINT32_MAX,
8569 : : };
8570 : 0 : struct rte_flow_action_jump jump_v = {
8571 : : .group = MLX5_HW_LOWEST_USABLE_GROUP,
8572 : : };
8573 : 0 : struct rte_flow_action_jump jump_m = {
8574 : : .group = UINT32_MAX,
8575 : : };
8576 : 0 : struct rte_flow_action actions_v[4] = { { 0 } };
8577 [ # # ]: 0 : struct rte_flow_action actions_m[4] = { { 0 } };
8578 : : unsigned int idx = 0;
8579 : :
8580 : : rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
8581 : : rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
8582 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
8583 : : RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8584 : : &set_tag_v, &set_tag_m);
8585 : : idx++;
8586 [ # # ]: 0 : if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
8587 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
8588 : : RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8589 : : ©_metadata_v, ©_metadata_m);
8590 : : idx++;
8591 : : }
8592 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
8593 : : &jump_v, &jump_m);
8594 : 0 : idx++;
8595 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
8596 : : NULL, NULL);
8597 : : idx++;
8598 : : MLX5_ASSERT(idx <= RTE_DIM(actions_v));
8599 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
8600 : : }
8601 : :
8602 : : static void
8603 : 0 : flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
8604 : : {
8605 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8606 : :
8607 [ # # ]: 0 : if (priv->hw_tx_repr_tagging_tbl) {
8608 : 0 : flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
8609 : 0 : priv->hw_tx_repr_tagging_tbl = NULL;
8610 : : }
8611 [ # # ]: 0 : if (priv->hw_tx_repr_tagging_at) {
8612 : 0 : flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
8613 : 0 : priv->hw_tx_repr_tagging_at = NULL;
8614 : : }
8615 [ # # ]: 0 : if (priv->hw_tx_repr_tagging_pt) {
8616 : 0 : flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
8617 : 0 : priv->hw_tx_repr_tagging_pt = NULL;
8618 : : }
8619 : 0 : }
8620 : :
8621 : : /**
8622 : : * Setup templates and table used to create default Tx flow rules. These default rules
8623 : : * allow for matching Tx representor traffic using a vport tag placed in unused bits of
8624 : : * REG_C_0 register.
8625 : : *
8626 : : * @param dev
8627 : : * Pointer to Ethernet device.
8628 : : * @param[out] error
8629 : : * Pointer to error structure.
8630 : : *
8631 : : * @return
8632 : : * 0 on success, negative errno value otherwise.
8633 : : */
8634 : : static int
8635 : 0 : flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error)
8636 : : {
8637 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8638 : 0 : struct rte_flow_template_table_attr attr = {
8639 : : .flow_attr = {
8640 : : .group = 0,
8641 : : .priority = MLX5_HW_LOWEST_PRIO_ROOT,
8642 : : .egress = 1,
8643 : : },
8644 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
8645 : : };
8646 : 0 : struct mlx5_flow_template_table_cfg cfg = {
8647 : : .attr = attr,
8648 : : .external = false,
8649 : : };
8650 : :
8651 : : MLX5_ASSERT(priv->sh->config.dv_esw_en);
8652 : : MLX5_ASSERT(priv->sh->config.repr_matching);
8653 : 0 : priv->hw_tx_repr_tagging_pt =
8654 : 0 : flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
8655 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_pt)
8656 : 0 : goto err;
8657 : 0 : priv->hw_tx_repr_tagging_at =
8658 : 0 : flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error);
8659 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_at)
8660 : 0 : goto err;
8661 : 0 : priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
8662 : : &priv->hw_tx_repr_tagging_pt, 1,
8663 : : &priv->hw_tx_repr_tagging_at, 1,
8664 : : error);
8665 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_tbl)
8666 : 0 : goto err;
8667 : : return 0;
8668 : 0 : err:
8669 : 0 : flow_hw_cleanup_tx_repr_tagging(dev);
8670 : 0 : return -rte_errno;
8671 : : }
8672 : :
8673 : : static uint32_t
8674 : : flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
8675 : : {
8676 : 0 : uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
8677 : :
8678 : : /* Mask is verified during device initialization. */
8679 : : MLX5_ASSERT(mask != 0);
8680 : : return mask;
8681 : : }
8682 : :
8683 : : static uint32_t
8684 : : flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
8685 : : {
8686 : 0 : uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
8687 : :
8688 : : /* Mask is verified during device initialization. */
8689 : : MLX5_ASSERT(mask != 0);
8690 : 0 : return RTE_BIT32(rte_bsf32(mask));
8691 : : }
8692 : :
8693 : : /**
8694 : : * Creates a flow pattern template used to match on E-Switch Manager.
8695 : : * This template is used to set up a table for SQ miss default flow.
8696 : : *
8697 : : * @param dev
8698 : : * Pointer to Ethernet device.
8699 : : * @param error
8700 : : * Pointer to error structure.
8701 : : *
8702 : : * @return
8703 : : * Pointer to flow pattern template on success, NULL otherwise.
8704 : : */
8705 : : static struct rte_flow_pattern_template *
8706 : 0 : flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev,
8707 : : struct rte_flow_error *error)
8708 : : {
8709 : 0 : struct rte_flow_pattern_template_attr attr = {
8710 : : .relaxed_matching = 0,
8711 : : .transfer = 1,
8712 : : };
8713 : 0 : struct rte_flow_item_ethdev port_spec = {
8714 : : .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
8715 : : };
8716 : 0 : struct rte_flow_item_ethdev port_mask = {
8717 : : .port_id = UINT16_MAX,
8718 : : };
8719 : 0 : struct mlx5_rte_flow_item_sq sq_mask = {
8720 : : .queue = UINT32_MAX,
8721 : : };
8722 : 0 : struct rte_flow_item items[] = {
8723 : : {
8724 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8725 : : .spec = &port_spec,
8726 : : .mask = &port_mask,
8727 : : },
8728 : : {
8729 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
8730 : : .mask = &sq_mask,
8731 : : },
8732 : : {
8733 : : .type = RTE_FLOW_ITEM_TYPE_END,
8734 : : },
8735 : : };
8736 : :
8737 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, error);
8738 : : }
8739 : :
8740 : : /**
8741 : : * Creates a flow pattern template used to match REG_C_0 and a SQ.
8742 : : * Matching on REG_C_0 is set up to match on all bits usable by user-space.
8743 : : * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
8744 : : * except the least significant bit, which will be set to 1.
8745 : : *
8746 : : * This template is used to set up a table for SQ miss default flow.
8747 : : *
8748 : : * @param dev
8749 : : * Pointer to Ethernet device.
8750 : : * @param error
8751 : : * Pointer to error structure.
8752 : : *
8753 : : * @return
8754 : : * Pointer to flow pattern template on success, NULL otherwise.
8755 : : */
8756 : : static struct rte_flow_pattern_template *
8757 : 0 : flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev,
8758 : : struct rte_flow_error *error)
8759 : : {
8760 : 0 : struct rte_flow_pattern_template_attr attr = {
8761 : : .relaxed_matching = 0,
8762 : : .transfer = 1,
8763 : : };
8764 : 0 : struct rte_flow_item_tag reg_c0_spec = {
8765 : : .index = (uint8_t)REG_C_0,
8766 : : };
8767 : 0 : struct rte_flow_item_tag reg_c0_mask = {
8768 : : .index = 0xff,
8769 : : .data = flow_hw_esw_mgr_regc_marker_mask(dev),
8770 : : };
8771 : 0 : struct mlx5_rte_flow_item_sq queue_mask = {
8772 : : .queue = UINT32_MAX,
8773 : : };
8774 : 0 : struct rte_flow_item items[] = {
8775 : : {
8776 : : .type = (enum rte_flow_item_type)
8777 : : MLX5_RTE_FLOW_ITEM_TYPE_TAG,
8778 : : .spec = ®_c0_spec,
8779 : : .mask = ®_c0_mask,
8780 : : },
8781 : : {
8782 : : .type = (enum rte_flow_item_type)
8783 : : MLX5_RTE_FLOW_ITEM_TYPE_SQ,
8784 : : .mask = &queue_mask,
8785 : : },
8786 : : {
8787 : : .type = RTE_FLOW_ITEM_TYPE_END,
8788 : : },
8789 : : };
8790 : :
8791 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, error);
8792 : : }
8793 : :
8794 : : /**
8795 : : * Creates a flow pattern template with unmasked represented port matching.
8796 : : * This template is used to set up a table for default transfer flows
8797 : : * directing packets to group 1.
8798 : : *
8799 : : * @param dev
8800 : : * Pointer to Ethernet device.
8801 : : * @param error
8802 : : * Pointer to error structure.
8803 : : *
8804 : : * @return
8805 : : * Pointer to flow pattern template on success, NULL otherwise.
8806 : : */
8807 : : static struct rte_flow_pattern_template *
8808 : 0 : flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev,
8809 : : struct rte_flow_error *error)
8810 : : {
8811 : 0 : struct rte_flow_pattern_template_attr attr = {
8812 : : .relaxed_matching = 0,
8813 : : .transfer = 1,
8814 : : };
8815 : 0 : struct rte_flow_item_ethdev port_mask = {
8816 : : .port_id = UINT16_MAX,
8817 : : };
8818 : 0 : struct rte_flow_item items[] = {
8819 : : {
8820 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
8821 : : .mask = &port_mask,
8822 : : },
8823 : : {
8824 : : .type = RTE_FLOW_ITEM_TYPE_END,
8825 : : },
8826 : : };
8827 : :
8828 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, error);
8829 : : }
8830 : :
8831 : : /*
8832 : : * Creating a flow pattern template with all ETH packets matching.
8833 : : * This template is used to set up a table for default Tx copy (Tx metadata
8834 : : * to REG_C_1) flow rule usage.
8835 : : *
8836 : : * @param dev
8837 : : * Pointer to Ethernet device.
8838 : : * @param error
8839 : : * Pointer to error structure.
8840 : : *
8841 : : * @return
8842 : : * Pointer to flow pattern template on success, NULL otherwise.
8843 : : */
8844 : : static struct rte_flow_pattern_template *
8845 : 0 : flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev,
8846 : : struct rte_flow_error *error)
8847 : : {
8848 : 0 : struct rte_flow_pattern_template_attr tx_pa_attr = {
8849 : : .relaxed_matching = 0,
8850 : : .egress = 1,
8851 : : };
8852 : 0 : struct rte_flow_item_eth promisc = {
8853 : : .hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8854 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8855 : : .hdr.ether_type = 0,
8856 : : };
8857 : 0 : struct rte_flow_item eth_all[] = {
8858 : : [0] = {
8859 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
8860 : : .spec = &promisc,
8861 : : .mask = &promisc,
8862 : : },
8863 : : [1] = {
8864 : : .type = RTE_FLOW_ITEM_TYPE_END,
8865 : : },
8866 : : };
8867 : :
8868 : 0 : return flow_hw_pattern_template_create(dev, &tx_pa_attr, eth_all, error);
8869 : : }
8870 : :
8871 : : /*
8872 : : * Creating a flow pattern template with all LACP packets matching, only for NIC
8873 : : * ingress domain.
8874 : : *
8875 : : * @param dev
8876 : : * Pointer to Ethernet device.
8877 : : * @param error
8878 : : * Pointer to error structure.
8879 : : *
8880 : : * @return
8881 : : * Pointer to flow pattern template on success, NULL otherwise.
8882 : : */
8883 : : static struct rte_flow_pattern_template *
8884 : 0 : flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
8885 : : {
8886 : 0 : struct rte_flow_pattern_template_attr pa_attr = {
8887 : : .relaxed_matching = 0,
8888 : : .ingress = 1,
8889 : : };
8890 : 0 : struct rte_flow_item_eth lacp_mask = {
8891 : : .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8892 : : .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
8893 : : .type = 0xFFFF,
8894 : : };
8895 : 0 : struct rte_flow_item eth_all[] = {
8896 : : [0] = {
8897 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
8898 : : .mask = &lacp_mask,
8899 : : },
8900 : : [1] = {
8901 : : .type = RTE_FLOW_ITEM_TYPE_END,
8902 : : },
8903 : : };
8904 : 0 : return flow_hw_pattern_template_create(dev, &pa_attr, eth_all, error);
8905 : : }
8906 : :
8907 : : /**
8908 : : * Creates a flow actions template with modify field action and masked jump action.
8909 : : * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
8910 : : * to 1, meaning that packet was originated from E-Switch Manager. Jump action
8911 : : * transfers steering to group 1.
8912 : : *
8913 : : * @param dev
8914 : : * Pointer to Ethernet device.
8915 : : * @param error
8916 : : * Pointer to error structure.
8917 : : *
8918 : : * @return
8919 : : * Pointer to flow actions template on success, NULL otherwise.
8920 : : */
8921 : : static struct rte_flow_actions_template *
8922 : 0 : flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
8923 : : struct rte_flow_error *error)
8924 : : {
8925 [ # # ]: 0 : uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
8926 : 0 : uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
8927 : 0 : struct rte_flow_actions_template_attr attr = {
8928 : : .transfer = 1,
8929 : : };
8930 [ # # ]: 0 : struct rte_flow_action_modify_field set_reg_v = {
8931 : : .operation = RTE_FLOW_MODIFY_SET,
8932 : : .dst = {
8933 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8934 : : .tag_index = REG_C_0,
8935 : : },
8936 : : .src = {
8937 : : .field = RTE_FLOW_FIELD_VALUE,
8938 : : },
8939 : : .width = rte_popcount32(marker_mask),
8940 : : };
8941 : 0 : struct rte_flow_action_modify_field set_reg_m = {
8942 : : .operation = RTE_FLOW_MODIFY_SET,
8943 : : .dst = {
8944 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
8945 : : .level = UINT8_MAX,
8946 : : .tag_index = UINT8_MAX,
8947 : : .offset = UINT32_MAX,
8948 : : },
8949 : : .src = {
8950 : : .field = RTE_FLOW_FIELD_VALUE,
8951 : : },
8952 : : .width = UINT32_MAX,
8953 : : };
8954 : 0 : struct rte_flow_action_jump jump_v = {
8955 : : .group = MLX5_HW_LOWEST_USABLE_GROUP,
8956 : : };
8957 : 0 : struct rte_flow_action_jump jump_m = {
8958 : : .group = UINT32_MAX,
8959 : : };
8960 : 0 : struct rte_flow_action actions_v[] = {
8961 : : {
8962 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8963 : : .conf = &set_reg_v,
8964 : : },
8965 : : {
8966 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
8967 : : .conf = &jump_v,
8968 : : },
8969 : : {
8970 : : .type = RTE_FLOW_ACTION_TYPE_END,
8971 : : }
8972 : : };
8973 : 0 : struct rte_flow_action actions_m[] = {
8974 : : {
8975 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8976 : : .conf = &set_reg_m,
8977 : : },
8978 : : {
8979 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
8980 : : .conf = &jump_m,
8981 : : },
8982 : : {
8983 : : .type = RTE_FLOW_ACTION_TYPE_END,
8984 : : }
8985 : : };
8986 : :
8987 [ # # ]: 0 : set_reg_v.dst.offset = rte_bsf32(marker_mask);
8988 : : rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
8989 : : rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
8990 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
8991 : : }
8992 : :
8993 : : /**
8994 : : * Creates a flow actions template with an unmasked JUMP action. Flows
8995 : : * based on this template will perform a jump to some group. This template
8996 : : * is used to set up tables for control flows.
8997 : : *
8998 : : * @param dev
8999 : : * Pointer to Ethernet device.
9000 : : * @param group
9001 : : * Destination group for this action template.
9002 : : * @param error
9003 : : * Pointer to error structure.
9004 : : *
9005 : : * @return
9006 : : * Pointer to flow actions template on success, NULL otherwise.
9007 : : */
9008 : : static struct rte_flow_actions_template *
9009 : 0 : flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
9010 : : uint32_t group,
9011 : : struct rte_flow_error *error)
9012 : : {
9013 : 0 : struct rte_flow_actions_template_attr attr = {
9014 : : .transfer = 1,
9015 : : };
9016 : 0 : struct rte_flow_action_jump jump_v = {
9017 : : .group = group,
9018 : : };
9019 : 0 : struct rte_flow_action_jump jump_m = {
9020 : : .group = UINT32_MAX,
9021 : : };
9022 : 0 : struct rte_flow_action actions_v[] = {
9023 : : {
9024 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
9025 : : .conf = &jump_v,
9026 : : },
9027 : : {
9028 : : .type = RTE_FLOW_ACTION_TYPE_END,
9029 : : }
9030 : : };
9031 : 0 : struct rte_flow_action actions_m[] = {
9032 : : {
9033 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
9034 : : .conf = &jump_m,
9035 : : },
9036 : : {
9037 : : .type = RTE_FLOW_ACTION_TYPE_END,
9038 : : }
9039 : : };
9040 : :
9041 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v,
9042 : : actions_m, error);
9043 : : }
9044 : :
9045 : : /**
9046 : : * Creates a flow action template with a unmasked REPRESENTED_PORT action.
9047 : : * It is used to create control flow tables.
9048 : : *
9049 : : * @param dev
9050 : : * Pointer to Ethernet device.
9051 : : * @param error
9052 : : * Pointer to error structure.
9053 : : *
9054 : : * @return
9055 : : * Pointer to flow action template on success, NULL otherwise.
9056 : : */
9057 : : static struct rte_flow_actions_template *
9058 : 0 : flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev,
9059 : : struct rte_flow_error *error)
9060 : : {
9061 : 0 : struct rte_flow_actions_template_attr attr = {
9062 : : .transfer = 1,
9063 : : };
9064 : 0 : struct rte_flow_action_ethdev port_v = {
9065 : : .port_id = 0,
9066 : : };
9067 : 0 : struct rte_flow_action actions_v[] = {
9068 : : {
9069 : : .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
9070 : : .conf = &port_v,
9071 : : },
9072 : : {
9073 : : .type = RTE_FLOW_ACTION_TYPE_END,
9074 : : }
9075 : : };
9076 : 0 : struct rte_flow_action_ethdev port_m = {
9077 : : .port_id = 0,
9078 : : };
9079 : 0 : struct rte_flow_action actions_m[] = {
9080 : : {
9081 : : .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
9082 : : .conf = &port_m,
9083 : : },
9084 : : {
9085 : : .type = RTE_FLOW_ACTION_TYPE_END,
9086 : : }
9087 : : };
9088 : :
9089 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
9090 : : }
9091 : :
9092 : : /*
9093 : : * Creating an actions template to use header modify action for register
9094 : : * copying. This template is used to set up a table for copy flow.
9095 : : *
9096 : : * @param dev
9097 : : * Pointer to Ethernet device.
9098 : : * @param error
9099 : : * Pointer to error structure.
9100 : : *
9101 : : * @return
9102 : : * Pointer to flow actions template on success, NULL otherwise.
9103 : : */
9104 : : static struct rte_flow_actions_template *
9105 : 0 : flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev,
9106 : : struct rte_flow_error *error)
9107 : : {
9108 : 0 : struct rte_flow_actions_template_attr tx_act_attr = {
9109 : : .egress = 1,
9110 : : };
9111 : 0 : const struct rte_flow_action_modify_field mreg_action = {
9112 : : .operation = RTE_FLOW_MODIFY_SET,
9113 : : .dst = {
9114 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9115 : : .tag_index = REG_C_1,
9116 : : },
9117 : : .src = {
9118 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9119 : : .tag_index = REG_A,
9120 : : },
9121 : : .width = 32,
9122 : : };
9123 : 0 : const struct rte_flow_action_modify_field mreg_mask = {
9124 : : .operation = RTE_FLOW_MODIFY_SET,
9125 : : .dst = {
9126 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9127 : : .level = UINT8_MAX,
9128 : : .tag_index = UINT8_MAX,
9129 : : .offset = UINT32_MAX,
9130 : : },
9131 : : .src = {
9132 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
9133 : : .level = UINT8_MAX,
9134 : : .tag_index = UINT8_MAX,
9135 : : .offset = UINT32_MAX,
9136 : : },
9137 : : .width = UINT32_MAX,
9138 : : };
9139 : 0 : const struct rte_flow_action_jump jump_action = {
9140 : : .group = 1,
9141 : : };
9142 : 0 : const struct rte_flow_action_jump jump_mask = {
9143 : : .group = UINT32_MAX,
9144 : : };
9145 : 0 : const struct rte_flow_action actions[] = {
9146 : : [0] = {
9147 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9148 : : .conf = &mreg_action,
9149 : : },
9150 : : [1] = {
9151 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
9152 : : .conf = &jump_action,
9153 : : },
9154 : : [2] = {
9155 : : .type = RTE_FLOW_ACTION_TYPE_END,
9156 : : },
9157 : : };
9158 : 0 : const struct rte_flow_action masks[] = {
9159 : : [0] = {
9160 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
9161 : : .conf = &mreg_mask,
9162 : : },
9163 : : [1] = {
9164 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
9165 : : .conf = &jump_mask,
9166 : : },
9167 : : [2] = {
9168 : : .type = RTE_FLOW_ACTION_TYPE_END,
9169 : : },
9170 : : };
9171 : :
9172 : 0 : return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
9173 : : masks, error);
9174 : : }
9175 : :
9176 : : /*
9177 : : * Creating an actions template to use default miss to re-route packets to the
9178 : : * kernel driver stack.
9179 : : * On root table, only DEFAULT_MISS action can be used.
9180 : : *
9181 : : * @param dev
9182 : : * Pointer to Ethernet device.
9183 : : * @param error
9184 : : * Pointer to error structure.
9185 : : *
9186 : : * @return
9187 : : * Pointer to flow actions template on success, NULL otherwise.
9188 : : */
9189 : : static struct rte_flow_actions_template *
9190 : 0 : flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
9191 : : {
9192 : 0 : struct rte_flow_actions_template_attr act_attr = {
9193 : : .ingress = 1,
9194 : : };
9195 : 0 : const struct rte_flow_action actions[] = {
9196 : : [0] = {
9197 : : .type = (enum rte_flow_action_type)
9198 : : MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
9199 : : },
9200 : : [1] = {
9201 : : .type = RTE_FLOW_ACTION_TYPE_END,
9202 : : },
9203 : : };
9204 : :
9205 : 0 : return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error);
9206 : : }
9207 : :
9208 : : /**
9209 : : * Creates a control flow table used to transfer traffic from E-Switch Manager
9210 : : * and TX queues from group 0 to group 1.
9211 : : *
9212 : : * @param dev
9213 : : * Pointer to Ethernet device.
9214 : : * @param it
9215 : : * Pointer to flow pattern template.
9216 : : * @param at
9217 : : * Pointer to flow actions template.
9218 : : * @param error
9219 : : * Pointer to error structure.
9220 : : *
9221 : : * @return
9222 : : * Pointer to flow table on success, NULL otherwise.
9223 : : */
9224 : : static struct rte_flow_template_table*
9225 : 0 : flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
9226 : : struct rte_flow_pattern_template *it,
9227 : : struct rte_flow_actions_template *at,
9228 : : struct rte_flow_error *error)
9229 : : {
9230 : 0 : struct rte_flow_template_table_attr attr = {
9231 : : .flow_attr = {
9232 : : .group = 0,
9233 : : .priority = MLX5_HW_LOWEST_PRIO_ROOT,
9234 : : .ingress = 0,
9235 : : .egress = 0,
9236 : : .transfer = 1,
9237 : : },
9238 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
9239 : : };
9240 : 0 : struct mlx5_flow_template_table_cfg cfg = {
9241 : : .attr = attr,
9242 : : .external = false,
9243 : : };
9244 : :
9245 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
9246 : : }
9247 : :
9248 : :
9249 : : /**
9250 : : * Creates a control flow table used to transfer traffic from E-Switch Manager
9251 : : * and TX queues from group 0 to group 1.
9252 : : *
9253 : : * @param dev
9254 : : * Pointer to Ethernet device.
9255 : : * @param it
9256 : : * Pointer to flow pattern template.
9257 : : * @param at
9258 : : * Pointer to flow actions template.
9259 : : * @param error
9260 : : * Pointer to error structure.
9261 : : *
9262 : : * @return
9263 : : * Pointer to flow table on success, NULL otherwise.
9264 : : */
9265 : : static struct rte_flow_template_table*
9266 : 0 : flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
9267 : : struct rte_flow_pattern_template *it,
9268 : : struct rte_flow_actions_template *at,
9269 : : struct rte_flow_error *error)
9270 : : {
9271 : 0 : struct rte_flow_template_table_attr attr = {
9272 : : .flow_attr = {
9273 : : .group = 1,
9274 : : .priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
9275 : : .ingress = 0,
9276 : : .egress = 0,
9277 : : .transfer = 1,
9278 : : },
9279 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
9280 : : };
9281 : 0 : struct mlx5_flow_template_table_cfg cfg = {
9282 : : .attr = attr,
9283 : : .external = false,
9284 : : };
9285 : :
9286 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
9287 : : }
9288 : :
9289 : : /*
9290 : : * Creating the default Tx metadata copy table on NIC Tx group 0.
9291 : : *
9292 : : * @param dev
9293 : : * Pointer to Ethernet device.
9294 : : * @param pt
9295 : : * Pointer to flow pattern template.
9296 : : * @param at
9297 : : * Pointer to flow actions template.
9298 : : * @param error
9299 : : * Pointer to error structure.
9300 : : *
9301 : : * @return
9302 : : * Pointer to flow table on success, NULL otherwise.
9303 : : */
9304 : : static struct rte_flow_template_table*
9305 : 0 : flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
9306 : : struct rte_flow_pattern_template *pt,
9307 : : struct rte_flow_actions_template *at,
9308 : : struct rte_flow_error *error)
9309 : : {
9310 : 0 : struct rte_flow_template_table_attr tx_tbl_attr = {
9311 : : .flow_attr = {
9312 : : .group = 0, /* Root */
9313 : : .priority = MLX5_HW_LOWEST_PRIO_ROOT,
9314 : : .egress = 1,
9315 : : },
9316 : : .nb_flows = 1, /* One default flow rule for all. */
9317 : : };
9318 : 0 : struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
9319 : : .attr = tx_tbl_attr,
9320 : : .external = false,
9321 : : };
9322 : :
9323 : 0 : return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error);
9324 : : }
9325 : :
9326 : : /**
9327 : : * Creates a control flow table used to transfer traffic
9328 : : * from group 0 to group 1.
9329 : : *
9330 : : * @param dev
9331 : : * Pointer to Ethernet device.
9332 : : * @param it
9333 : : * Pointer to flow pattern template.
9334 : : * @param at
9335 : : * Pointer to flow actions template.
9336 : : * @param error
9337 : : * Pointer to error structure.
9338 : : *
9339 : : * @return
9340 : : * Pointer to flow table on success, NULL otherwise.
9341 : : */
9342 : : static struct rte_flow_template_table *
9343 : 0 : flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
9344 : : struct rte_flow_pattern_template *it,
9345 : : struct rte_flow_actions_template *at,
9346 : : struct rte_flow_error *error)
9347 : : {
9348 : 0 : struct rte_flow_template_table_attr attr = {
9349 : : .flow_attr = {
9350 : : .group = 0,
9351 : : .priority = 0,
9352 : : .ingress = 0,
9353 : : .egress = 0,
9354 : : .transfer = 1,
9355 : : },
9356 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
9357 : : };
9358 : 0 : struct mlx5_flow_template_table_cfg cfg = {
9359 : : .attr = attr,
9360 : : .external = false,
9361 : : };
9362 : :
9363 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
9364 : : }
9365 : :
9366 : : /**
9367 : : * Cleans up all template tables and pattern, and actions templates used for
9368 : : * FDB control flow rules.
9369 : : *
9370 : : * @param dev
9371 : : * Pointer to Ethernet device.
9372 : : */
9373 : : static void
9374 : 0 : flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev)
9375 : : {
9376 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9377 : : struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
9378 : :
9379 [ # # ]: 0 : if (!priv->hw_ctrl_fdb)
9380 : : return;
9381 : : hw_ctrl_fdb = priv->hw_ctrl_fdb;
9382 : : /* Clean up templates used for LACP default miss table. */
9383 [ # # ]: 0 : if (hw_ctrl_fdb->hw_lacp_rx_tbl)
9384 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL));
9385 [ # # ]: 0 : if (hw_ctrl_fdb->lacp_rx_actions_tmpl)
9386 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl,
9387 : : NULL));
9388 [ # # ]: 0 : if (hw_ctrl_fdb->lacp_rx_items_tmpl)
9389 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
9390 : : NULL));
9391 : : /* Clean up templates used for default Tx metadata copy. */
9392 [ # # ]: 0 : if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
9393 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL));
9394 [ # # ]: 0 : if (hw_ctrl_fdb->tx_meta_actions_tmpl)
9395 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl,
9396 : : NULL));
9397 [ # # ]: 0 : if (hw_ctrl_fdb->tx_meta_items_tmpl)
9398 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl,
9399 : : NULL));
9400 : : /* Clean up templates used for default FDB jump rule. */
9401 [ # # ]: 0 : if (hw_ctrl_fdb->hw_esw_zero_tbl)
9402 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL));
9403 [ # # ]: 0 : if (hw_ctrl_fdb->jump_one_actions_tmpl)
9404 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl,
9405 : : NULL));
9406 [ # # ]: 0 : if (hw_ctrl_fdb->port_items_tmpl)
9407 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl,
9408 : : NULL));
9409 : : /* Clean up templates used for default SQ miss flow rules - non-root table. */
9410 [ # # ]: 0 : if (hw_ctrl_fdb->hw_esw_sq_miss_tbl)
9411 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL));
9412 [ # # ]: 0 : if (hw_ctrl_fdb->regc_sq_items_tmpl)
9413 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
9414 : : NULL));
9415 [ # # ]: 0 : if (hw_ctrl_fdb->port_actions_tmpl)
9416 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl,
9417 : : NULL));
9418 : : /* Clean up templates used for default SQ miss flow rules - root table. */
9419 [ # # ]: 0 : if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl)
9420 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL));
9421 [ # # ]: 0 : if (hw_ctrl_fdb->regc_jump_actions_tmpl)
9422 : 0 : claim_zero(flow_hw_actions_template_destroy(dev,
9423 : : hw_ctrl_fdb->regc_jump_actions_tmpl, NULL));
9424 [ # # ]: 0 : if (hw_ctrl_fdb->esw_mgr_items_tmpl)
9425 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
9426 : : NULL));
9427 : : /* Clean up templates structure for FDB control flow rules. */
9428 : 0 : mlx5_free(hw_ctrl_fdb);
9429 : 0 : priv->hw_ctrl_fdb = NULL;
9430 : : }
9431 : :
9432 : : /*
9433 : : * Create a table on the root group to for the LACP traffic redirecting.
9434 : : *
9435 : : * @param dev
9436 : : * Pointer to Ethernet device.
9437 : : * @param it
9438 : : * Pointer to flow pattern template.
9439 : : * @param at
9440 : : * Pointer to flow actions template.
9441 : : *
9442 : : * @return
9443 : : * Pointer to flow table on success, NULL otherwise.
9444 : : */
9445 : : static struct rte_flow_template_table *
9446 : 0 : flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
9447 : : struct rte_flow_pattern_template *it,
9448 : : struct rte_flow_actions_template *at,
9449 : : struct rte_flow_error *error)
9450 : : {
9451 : 0 : struct rte_flow_template_table_attr attr = {
9452 : : .flow_attr = {
9453 : : .group = 0,
9454 : : .priority = 0,
9455 : : .ingress = 1,
9456 : : .egress = 0,
9457 : : .transfer = 0,
9458 : : },
9459 : : .nb_flows = 1,
9460 : : };
9461 : 0 : struct mlx5_flow_template_table_cfg cfg = {
9462 : : .attr = attr,
9463 : : .external = false,
9464 : : };
9465 : :
9466 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
9467 : : }
9468 : :
9469 : : /**
9470 : : * Creates a set of flow tables used to create control flows used
9471 : : * when E-Switch is engaged.
9472 : : *
9473 : : * @param dev
9474 : : * Pointer to Ethernet device.
9475 : : * @param error
9476 : : * Pointer to error structure.
9477 : : *
9478 : : * @return
9479 : : * 0 on success, negative values otherwise
9480 : : */
9481 : : static int
9482 : 0 : flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
9483 : : {
9484 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9485 : : struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
9486 : 0 : uint32_t xmeta = priv->sh->config.dv_xmeta_en;
9487 : 0 : uint32_t repr_matching = priv->sh->config.repr_matching;
9488 : :
9489 : : MLX5_ASSERT(priv->hw_ctrl_fdb == NULL);
9490 : 0 : hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY);
9491 [ # # ]: 0 : if (!hw_ctrl_fdb) {
9492 : 0 : DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates",
9493 : : dev->data->port_id);
9494 : 0 : rte_errno = ENOMEM;
9495 : 0 : goto err;
9496 : : }
9497 : 0 : priv->hw_ctrl_fdb = hw_ctrl_fdb;
9498 : : /* Create templates and table for default SQ miss flow rules - root table. */
9499 : 0 : hw_ctrl_fdb->esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
9500 [ # # ]: 0 : if (!hw_ctrl_fdb->esw_mgr_items_tmpl) {
9501 : 0 : DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
9502 : : " template for control flows", dev->data->port_id);
9503 : 0 : goto err;
9504 : : }
9505 : 0 : hw_ctrl_fdb->regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template
9506 : : (dev, error);
9507 [ # # ]: 0 : if (!hw_ctrl_fdb->regc_jump_actions_tmpl) {
9508 : 0 : DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
9509 : : " for control flows", dev->data->port_id);
9510 : 0 : goto err;
9511 : : }
9512 : 0 : hw_ctrl_fdb->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table
9513 : : (dev, hw_ctrl_fdb->esw_mgr_items_tmpl, hw_ctrl_fdb->regc_jump_actions_tmpl,
9514 : : error);
9515 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) {
9516 : 0 : DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
9517 : : " for control flows", dev->data->port_id);
9518 : 0 : goto err;
9519 : : }
9520 : : /* Create templates and table for default SQ miss flow rules - non-root table. */
9521 : 0 : hw_ctrl_fdb->regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
9522 [ # # ]: 0 : if (!hw_ctrl_fdb->regc_sq_items_tmpl) {
9523 : 0 : DRV_LOG(ERR, "port %u failed to create SQ item template for"
9524 : : " control flows", dev->data->port_id);
9525 : 0 : goto err;
9526 : : }
9527 : 0 : hw_ctrl_fdb->port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error);
9528 [ # # ]: 0 : if (!hw_ctrl_fdb->port_actions_tmpl) {
9529 : 0 : DRV_LOG(ERR, "port %u failed to create port action template"
9530 : : " for control flows", dev->data->port_id);
9531 : 0 : goto err;
9532 : : }
9533 : 0 : hw_ctrl_fdb->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table
9534 : : (dev, hw_ctrl_fdb->regc_sq_items_tmpl, hw_ctrl_fdb->port_actions_tmpl,
9535 : : error);
9536 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
9537 : 0 : DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
9538 : : " for control flows", dev->data->port_id);
9539 : 0 : goto err;
9540 : : }
9541 : : /* Create templates and table for default FDB jump flow rules. */
9542 : 0 : hw_ctrl_fdb->port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error);
9543 [ # # ]: 0 : if (!hw_ctrl_fdb->port_items_tmpl) {
9544 : 0 : DRV_LOG(ERR, "port %u failed to create SQ item template for"
9545 : : " control flows", dev->data->port_id);
9546 : 0 : goto err;
9547 : : }
9548 : 0 : hw_ctrl_fdb->jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template
9549 : : (dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
9550 [ # # ]: 0 : if (!hw_ctrl_fdb->jump_one_actions_tmpl) {
9551 : 0 : DRV_LOG(ERR, "port %u failed to create jump action template"
9552 : : " for control flows", dev->data->port_id);
9553 : 0 : goto err;
9554 : : }
9555 : 0 : hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table
9556 : : (dev, hw_ctrl_fdb->port_items_tmpl, hw_ctrl_fdb->jump_one_actions_tmpl,
9557 : : error);
9558 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_esw_zero_tbl) {
9559 : 0 : DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
9560 : : " for control flows", dev->data->port_id);
9561 : 0 : goto err;
9562 : : }
9563 : : /* Create templates and table for default Tx metadata copy flow rule. */
9564 [ # # ]: 0 : if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) {
9565 : 0 : hw_ctrl_fdb->tx_meta_items_tmpl =
9566 : 0 : flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error);
9567 [ # # ]: 0 : if (!hw_ctrl_fdb->tx_meta_items_tmpl) {
9568 : 0 : DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern"
9569 : : " template for control flows", dev->data->port_id);
9570 : 0 : goto err;
9571 : : }
9572 : 0 : hw_ctrl_fdb->tx_meta_actions_tmpl =
9573 : 0 : flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
9574 [ # # ]: 0 : if (!hw_ctrl_fdb->tx_meta_actions_tmpl) {
9575 : 0 : DRV_LOG(ERR, "port %u failed to Tx metadata copy actions"
9576 : : " template for control flows", dev->data->port_id);
9577 : 0 : goto err;
9578 : : }
9579 : 0 : hw_ctrl_fdb->hw_tx_meta_cpy_tbl =
9580 : 0 : flow_hw_create_tx_default_mreg_copy_table
9581 : : (dev, hw_ctrl_fdb->tx_meta_items_tmpl,
9582 : : hw_ctrl_fdb->tx_meta_actions_tmpl, error);
9583 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) {
9584 : 0 : DRV_LOG(ERR, "port %u failed to create table for default"
9585 : : " Tx metadata copy flow rule", dev->data->port_id);
9586 : 0 : goto err;
9587 : : }
9588 : : }
9589 : : /* Create LACP default miss table. */
9590 [ # # # # : 0 : if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
# # ]
9591 : 0 : hw_ctrl_fdb->lacp_rx_items_tmpl =
9592 : 0 : flow_hw_create_lacp_rx_pattern_template(dev, error);
9593 [ # # ]: 0 : if (!hw_ctrl_fdb->lacp_rx_items_tmpl) {
9594 : 0 : DRV_LOG(ERR, "port %u failed to create pattern template"
9595 : : " for LACP Rx traffic", dev->data->port_id);
9596 : 0 : goto err;
9597 : : }
9598 : 0 : hw_ctrl_fdb->lacp_rx_actions_tmpl =
9599 : 0 : flow_hw_create_lacp_rx_actions_template(dev, error);
9600 [ # # ]: 0 : if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) {
9601 : 0 : DRV_LOG(ERR, "port %u failed to create actions template"
9602 : : " for LACP Rx traffic", dev->data->port_id);
9603 : 0 : goto err;
9604 : : }
9605 : 0 : hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table
9606 : : (dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
9607 : : hw_ctrl_fdb->lacp_rx_actions_tmpl, error);
9608 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_lacp_rx_tbl) {
9609 : 0 : DRV_LOG(ERR, "port %u failed to create template table for"
9610 : : " for LACP Rx traffic", dev->data->port_id);
9611 : 0 : goto err;
9612 : : }
9613 : : }
9614 : : return 0;
9615 : :
9616 : 0 : err:
9617 : 0 : flow_hw_cleanup_ctrl_fdb_tables(dev);
9618 : 0 : return -EINVAL;
9619 : : }
9620 : :
9621 : : static void
9622 : 0 : flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
9623 : : struct mlx5_aso_ct_pools_mng *ct_mng)
9624 : : {
9625 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9626 : :
9627 : 0 : mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
9628 : 0 : mlx5_free(ct_mng);
9629 : 0 : }
9630 : :
9631 : : static void
9632 : 0 : flow_hw_ct_pool_destroy(struct rte_eth_dev *dev,
9633 : : struct mlx5_aso_ct_pool *pool)
9634 : : {
9635 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9636 : :
9637 [ # # ]: 0 : if (pool->dr_action)
9638 : 0 : mlx5dr_action_destroy(pool->dr_action);
9639 [ # # ]: 0 : if (!priv->shared_host) {
9640 [ # # ]: 0 : if (pool->devx_obj)
9641 : 0 : claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
9642 [ # # ]: 0 : if (pool->cts)
9643 : 0 : mlx5_ipool_destroy(pool->cts);
9644 : : }
9645 : 0 : mlx5_free(pool);
9646 : 0 : }
9647 : :
9648 : : static struct mlx5_aso_ct_pool *
9649 : 0 : flow_hw_ct_pool_create(struct rte_eth_dev *dev,
9650 : : const struct rte_flow_port_attr *port_attr)
9651 : : {
9652 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9653 : : struct mlx5_aso_ct_pool *pool;
9654 : : struct mlx5_devx_obj *obj;
9655 [ # # ]: 0 : uint32_t nb_cts = rte_align32pow2(port_attr->nb_conn_tracks);
9656 : : uint32_t log_obj_size = rte_log2_u32(nb_cts);
9657 : 0 : struct mlx5_indexed_pool_config cfg = {
9658 : : .size = sizeof(struct mlx5_aso_ct_action),
9659 : : .trunk_size = 1 << 12,
9660 : : .per_core_cache = 1 << 13,
9661 : : .need_lock = 1,
9662 : 0 : .release_mem_en = !!priv->sh->config.reclaim_mode,
9663 : : .malloc = mlx5_malloc,
9664 : : .free = mlx5_free,
9665 : : .type = "mlx5_hw_ct_action",
9666 : : };
9667 : : int reg_id;
9668 : : uint32_t flags = 0;
9669 : :
9670 : 0 : pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9671 [ # # ]: 0 : if (!pool) {
9672 : 0 : rte_errno = ENOMEM;
9673 : 0 : return NULL;
9674 : : }
9675 [ # # ]: 0 : if (!priv->shared_host) {
9676 : : /*
9677 : : * No need for local cache if CT number is a small number. Since
9678 : : * flow insertion rate will be very limited in that case. Here let's
9679 : : * set the number to less than default trunk size 4K.
9680 : : */
9681 [ # # ]: 0 : if (nb_cts <= cfg.trunk_size) {
9682 : 0 : cfg.per_core_cache = 0;
9683 : 0 : cfg.trunk_size = nb_cts;
9684 [ # # ]: 0 : } else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
9685 : 0 : cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
9686 : : }
9687 : 0 : cfg.max_idx = nb_cts;
9688 : 0 : pool->cts = mlx5_ipool_create(&cfg);
9689 [ # # ]: 0 : if (!pool->cts)
9690 : 0 : goto err;
9691 : 0 : obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
9692 : 0 : priv->sh->cdev->pdn,
9693 : : log_obj_size);
9694 [ # # ]: 0 : if (!obj) {
9695 : 0 : rte_errno = ENODATA;
9696 : 0 : DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
9697 : 0 : goto err;
9698 : : }
9699 : 0 : pool->devx_obj = obj;
9700 : : } else {
9701 : : struct rte_eth_dev *host_dev = priv->shared_host;
9702 : 0 : struct mlx5_priv *host_priv = host_dev->data->dev_private;
9703 : :
9704 : 0 : pool->devx_obj = host_priv->hws_ctpool->devx_obj;
9705 : 0 : pool->cts = host_priv->hws_ctpool->cts;
9706 : : MLX5_ASSERT(pool->cts);
9707 : : MLX5_ASSERT(!port_attr->nb_conn_tracks);
9708 : : }
9709 : 0 : reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
9710 : : flags |= MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
9711 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en && priv->master)
9712 : : flags |= MLX5DR_ACTION_FLAG_HWS_FDB;
9713 : 0 : pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
9714 : 0 : (struct mlx5dr_devx_obj *)pool->devx_obj,
9715 : 0 : reg_id - REG_C_0, flags);
9716 [ # # ]: 0 : if (!pool->dr_action)
9717 : 0 : goto err;
9718 : 0 : pool->sq = priv->ct_mng->aso_sqs;
9719 : : /* Assign the last extra ASO SQ as public SQ. */
9720 : 0 : pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
9721 : 0 : return pool;
9722 : 0 : err:
9723 : 0 : flow_hw_ct_pool_destroy(dev, pool);
9724 : 0 : return NULL;
9725 : : }
9726 : :
9727 : : static void
9728 : 0 : flow_hw_destroy_vlan(struct rte_eth_dev *dev)
9729 : : {
9730 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9731 : : enum mlx5dr_table_type i;
9732 : :
9733 [ # # ]: 0 : for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
9734 [ # # ]: 0 : if (priv->hw_pop_vlan[i]) {
9735 : 0 : mlx5dr_action_destroy(priv->hw_pop_vlan[i]);
9736 : 0 : priv->hw_pop_vlan[i] = NULL;
9737 : : }
9738 [ # # ]: 0 : if (priv->hw_push_vlan[i]) {
9739 : 0 : mlx5dr_action_destroy(priv->hw_push_vlan[i]);
9740 : 0 : priv->hw_push_vlan[i] = NULL;
9741 : : }
9742 : : }
9743 : 0 : }
9744 : :
9745 : : static int
9746 : 0 : flow_hw_create_vlan(struct rte_eth_dev *dev)
9747 : : {
9748 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9749 : : enum mlx5dr_table_type i;
9750 : 0 : const enum mlx5dr_action_flags flags[MLX5DR_TABLE_TYPE_MAX] = {
9751 : : MLX5DR_ACTION_FLAG_HWS_RX,
9752 : : MLX5DR_ACTION_FLAG_HWS_TX,
9753 : : MLX5DR_ACTION_FLAG_HWS_FDB
9754 : : };
9755 : :
9756 : : /* rte_errno is set in the mlx5dr_action* functions. */
9757 [ # # ]: 0 : for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++) {
9758 : 0 : priv->hw_pop_vlan[i] =
9759 : 0 : mlx5dr_action_create_pop_vlan(priv->dr_ctx, flags[i]);
9760 [ # # ]: 0 : if (!priv->hw_pop_vlan[i])
9761 : 0 : return -rte_errno;
9762 : 0 : priv->hw_push_vlan[i] =
9763 : 0 : mlx5dr_action_create_push_vlan(priv->dr_ctx, flags[i]);
9764 [ # # ]: 0 : if (!priv->hw_pop_vlan[i])
9765 : 0 : return -rte_errno;
9766 : : }
9767 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en && priv->master) {
9768 : 0 : priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB] =
9769 : 0 : mlx5dr_action_create_pop_vlan
9770 : : (priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
9771 [ # # ]: 0 : if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
9772 : 0 : return -rte_errno;
9773 : 0 : priv->hw_push_vlan[MLX5DR_TABLE_TYPE_FDB] =
9774 : 0 : mlx5dr_action_create_push_vlan
9775 : : (priv->dr_ctx, MLX5DR_ACTION_FLAG_HWS_FDB);
9776 [ # # ]: 0 : if (!priv->hw_pop_vlan[MLX5DR_TABLE_TYPE_FDB])
9777 : 0 : return -rte_errno;
9778 : : }
9779 : : return 0;
9780 : : }
9781 : :
9782 : : static void
9783 : 0 : flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
9784 : : {
9785 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9786 : : unsigned int i;
9787 : : unsigned int j;
9788 : :
9789 [ # # ]: 0 : if (!priv->hw_ctrl_rx)
9790 : : return;
9791 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
9792 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
9793 : 0 : struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
9794 : 0 : struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
9795 : :
9796 [ # # ]: 0 : if (tbl)
9797 : 0 : claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
9798 [ # # ]: 0 : if (pt)
9799 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
9800 : : }
9801 : : }
9802 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
9803 : 0 : struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
9804 : :
9805 [ # # ]: 0 : if (at)
9806 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
9807 : : }
9808 : 0 : mlx5_free(priv->hw_ctrl_rx);
9809 : 0 : priv->hw_ctrl_rx = NULL;
9810 : : }
9811 : :
9812 : : static uint64_t
9813 : : flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9814 : : {
9815 : : switch (rss_type) {
9816 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
9817 : : return 0;
9818 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
9819 : : return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
9820 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
9821 : : return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
9822 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
9823 : : return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
9824 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
9825 : : return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
9826 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
9827 : : return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
9828 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
9829 : : return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
9830 : : default:
9831 : : /* Should not reach here. */
9832 : : MLX5_ASSERT(false);
9833 : : return 0;
9834 : : }
9835 : : }
9836 : :
9837 : : static struct rte_flow_actions_template *
9838 : 0 : flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
9839 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9840 : : {
9841 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9842 : 0 : struct rte_flow_actions_template_attr attr = {
9843 : : .ingress = 1,
9844 : : };
9845 : : uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
9846 : 0 : struct rte_flow_action_rss rss_conf = {
9847 : : .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
9848 : : .level = 0,
9849 : : .types = 0,
9850 : 0 : .key_len = priv->rss_conf.rss_key_len,
9851 : 0 : .key = priv->rss_conf.rss_key,
9852 : 0 : .queue_num = priv->reta_idx_n,
9853 : : .queue = queue,
9854 : : };
9855 : 0 : struct rte_flow_action actions[] = {
9856 : : {
9857 : : .type = RTE_FLOW_ACTION_TYPE_RSS,
9858 : : .conf = &rss_conf,
9859 : : },
9860 : : {
9861 : : .type = RTE_FLOW_ACTION_TYPE_END,
9862 : : }
9863 : : };
9864 [ # # ]: 0 : struct rte_flow_action masks[] = {
9865 : : {
9866 : : .type = RTE_FLOW_ACTION_TYPE_RSS,
9867 : : .conf = &rss_conf,
9868 : : },
9869 : : {
9870 : : .type = RTE_FLOW_ACTION_TYPE_END,
9871 : : }
9872 : : };
9873 : : struct rte_flow_actions_template *at;
9874 : : struct rte_flow_error error;
9875 : : unsigned int i;
9876 : :
9877 : : MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
9878 : : /* Select proper RSS hash types and based on that configure the actions template. */
9879 : 0 : rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
9880 [ # # ]: 0 : if (rss_conf.types) {
9881 [ # # ]: 0 : for (i = 0; i < priv->reta_idx_n; ++i)
9882 : 0 : queue[i] = (*priv->reta_idx)[i];
9883 : : } else {
9884 : 0 : rss_conf.queue_num = 1;
9885 : 0 : queue[0] = (*priv->reta_idx)[0];
9886 : : }
9887 : 0 : at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
9888 [ # # ]: 0 : if (!at)
9889 [ # # ]: 0 : DRV_LOG(ERR,
9890 : : "Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
9891 : : rte_errno, error.type,
9892 : : error.message ? error.message : "(no stated reason)");
9893 : 0 : return at;
9894 : : }
9895 : :
9896 : : static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
9897 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
9898 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
9899 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
9900 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
9901 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
9902 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
9903 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
9904 : : };
9905 : :
9906 : : static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
9907 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
9908 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
9909 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
9910 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
9911 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
9912 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
9913 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
9914 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
9915 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
9916 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
9917 : : MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
9918 : : };
9919 : :
9920 : : static struct rte_flow_template_table_attr
9921 : : flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
9922 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9923 : : {
9924 : 0 : return (struct rte_flow_template_table_attr){
9925 : : .flow_attr = {
9926 : : .group = 0,
9927 : 0 : .priority = ctrl_rx_rss_priority_map[rss_type],
9928 : : .ingress = 1,
9929 : : },
9930 : 0 : .nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
9931 : : };
9932 : : }
9933 : :
9934 : : static struct rte_flow_item
9935 : : flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
9936 : : {
9937 : : struct rte_flow_item item = {
9938 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
9939 : : .mask = NULL,
9940 : : };
9941 : :
9942 : 0 : switch (eth_pattern_type) {
9943 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
9944 : : item.mask = &ctrl_rx_eth_promisc_mask;
9945 : : break;
9946 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
9947 : : item.mask = &ctrl_rx_eth_mcast_mask;
9948 : 0 : break;
9949 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
9950 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
9951 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
9952 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
9953 : : item.mask = &ctrl_rx_eth_dmac_mask;
9954 : 0 : break;
9955 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
9956 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
9957 : : item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
9958 : 0 : break;
9959 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
9960 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
9961 : : item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
9962 : 0 : break;
9963 : 0 : default:
9964 : : /* Should not reach here - ETH mask must be present. */
9965 : : item.type = RTE_FLOW_ITEM_TYPE_END;
9966 : : MLX5_ASSERT(false);
9967 : 0 : break;
9968 : : }
9969 : 0 : return item;
9970 : : }
9971 : :
9972 : : static struct rte_flow_item
9973 : : flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
9974 : : {
9975 : : struct rte_flow_item item = {
9976 : : .type = RTE_FLOW_ITEM_TYPE_VOID,
9977 : : .mask = NULL,
9978 : : };
9979 : :
9980 [ # # ]: 0 : switch (eth_pattern_type) {
9981 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
9982 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
9983 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
9984 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
9985 : : item.type = RTE_FLOW_ITEM_TYPE_VLAN;
9986 : : item.mask = &rte_flow_item_vlan_mask;
9987 : 0 : break;
9988 : : default:
9989 : : /* Nothing to update. */
9990 : : break;
9991 : : }
9992 : 0 : return item;
9993 : : }
9994 : :
9995 : : static struct rte_flow_item
9996 : : flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
9997 : : {
9998 : : struct rte_flow_item item = {
9999 : : .type = RTE_FLOW_ITEM_TYPE_VOID,
10000 : : .mask = NULL,
10001 : : };
10002 : :
10003 [ # # # ]: 0 : switch (rss_type) {
10004 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
10005 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
10006 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
10007 : : item.type = RTE_FLOW_ITEM_TYPE_IPV4;
10008 : 0 : break;
10009 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
10010 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
10011 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
10012 : : item.type = RTE_FLOW_ITEM_TYPE_IPV6;
10013 : 0 : break;
10014 : : default:
10015 : : /* Nothing to update. */
10016 : : break;
10017 : : }
10018 : 0 : return item;
10019 : : }
10020 : :
10021 : : static struct rte_flow_item
10022 : : flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
10023 : : {
10024 : : struct rte_flow_item item = {
10025 : : .type = RTE_FLOW_ITEM_TYPE_VOID,
10026 : : .mask = NULL,
10027 : : };
10028 : :
10029 [ # # # ]: 0 : switch (rss_type) {
10030 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
10031 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
10032 : : item.type = RTE_FLOW_ITEM_TYPE_UDP;
10033 : 0 : break;
10034 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
10035 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
10036 : : item.type = RTE_FLOW_ITEM_TYPE_TCP;
10037 : 0 : break;
10038 : : default:
10039 : : /* Nothing to update. */
10040 : : break;
10041 : : }
10042 : 0 : return item;
10043 : : }
10044 : :
10045 : : static struct rte_flow_pattern_template *
10046 : 0 : flow_hw_create_ctrl_rx_pattern_template
10047 : : (struct rte_eth_dev *dev,
10048 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
10049 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
10050 : : {
10051 : 0 : const struct rte_flow_pattern_template_attr attr = {
10052 : : .relaxed_matching = 0,
10053 : : .ingress = 1,
10054 : : };
10055 [ # # # # : 0 : struct rte_flow_item items[] = {
# # ]
10056 : : /* Matching patterns */
10057 : : flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
10058 : : flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
10059 : : flow_hw_get_ctrl_rx_l3_item(rss_type),
10060 : : flow_hw_get_ctrl_rx_l4_item(rss_type),
10061 : : /* Terminate pattern */
10062 : : { .type = RTE_FLOW_ITEM_TYPE_END }
10063 : : };
10064 : :
10065 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, NULL);
10066 : : }
10067 : :
10068 : : static int
10069 : 0 : flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
10070 : : {
10071 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10072 : : unsigned int i;
10073 : : unsigned int j;
10074 : : int ret;
10075 : :
10076 : : MLX5_ASSERT(!priv->hw_ctrl_rx);
10077 : 0 : priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
10078 : 0 : RTE_CACHE_LINE_SIZE, rte_socket_id());
10079 [ # # ]: 0 : if (!priv->hw_ctrl_rx) {
10080 : 0 : DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
10081 : 0 : rte_errno = ENOMEM;
10082 : 0 : return -rte_errno;
10083 : : }
10084 : : /* Create all pattern template variants. */
10085 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
10086 : : enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
10087 : :
10088 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
10089 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
10090 : : struct rte_flow_template_table_attr attr;
10091 : : struct rte_flow_pattern_template *pt;
10092 : :
10093 : : attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
10094 : 0 : pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
10095 : : rss_type);
10096 [ # # ]: 0 : if (!pt)
10097 : 0 : goto err;
10098 : 0 : priv->hw_ctrl_rx->tables[i][j].attr = attr;
10099 : 0 : priv->hw_ctrl_rx->tables[i][j].pt = pt;
10100 : : }
10101 : : }
10102 : : return 0;
10103 : : err:
10104 : 0 : ret = rte_errno;
10105 : 0 : flow_hw_cleanup_ctrl_rx_tables(dev);
10106 : 0 : rte_errno = ret;
10107 : 0 : return -ret;
10108 : : }
10109 : :
10110 : : void
10111 : 0 : mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
10112 : : {
10113 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10114 : : struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
10115 : : unsigned int i;
10116 : : unsigned int j;
10117 : :
10118 [ # # ]: 0 : if (!priv->dr_ctx)
10119 : : return;
10120 [ # # ]: 0 : if (!priv->hw_ctrl_rx)
10121 : : return;
10122 : : hw_ctrl_rx = priv->hw_ctrl_rx;
10123 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
10124 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
10125 : : struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
10126 : :
10127 [ # # ]: 0 : if (tmpls->tbl) {
10128 : 0 : claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
10129 : 0 : tmpls->tbl = NULL;
10130 : : }
10131 : : }
10132 : : }
10133 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
10134 [ # # ]: 0 : if (hw_ctrl_rx->rss[j]) {
10135 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
10136 : 0 : hw_ctrl_rx->rss[j] = NULL;
10137 : : }
10138 : : }
10139 : : }
10140 : :
10141 : : /**
10142 : : * Copy the provided HWS configuration to a newly allocated buffer.
10143 : : *
10144 : : * @param[in] port_attr
10145 : : * Port configuration attributes.
10146 : : * @param[in] nb_queue
10147 : : * Number of queue.
10148 : : * @param[in] queue_attr
10149 : : * Array that holds attributes for each flow queue.
10150 : : *
10151 : : * @return
10152 : : * Pointer to copied HWS configuration is returned on success.
10153 : : * Otherwise, NULL is returned and rte_errno is set.
10154 : : */
10155 : : static struct mlx5_flow_hw_attr *
10156 : 0 : flow_hw_alloc_copy_config(const struct rte_flow_port_attr *port_attr,
10157 : : const uint16_t nb_queue,
10158 : : const struct rte_flow_queue_attr *queue_attr[],
10159 : : struct rte_flow_error *error)
10160 : : {
10161 : : struct mlx5_flow_hw_attr *hw_attr;
10162 : : size_t hw_attr_size;
10163 : : unsigned int i;
10164 : :
10165 : 0 : hw_attr_size = sizeof(*hw_attr) + nb_queue * sizeof(*hw_attr->queue_attr);
10166 : 0 : hw_attr = mlx5_malloc(MLX5_MEM_ZERO, hw_attr_size, 0, SOCKET_ID_ANY);
10167 [ # # ]: 0 : if (!hw_attr) {
10168 : 0 : rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10169 : : "Not enough memory to store configuration");
10170 : 0 : return NULL;
10171 : : }
10172 : 0 : memcpy(&hw_attr->port_attr, port_attr, sizeof(*port_attr));
10173 : 0 : hw_attr->nb_queue = nb_queue;
10174 : : /* Queue attributes are placed after the mlx5_flow_hw_attr. */
10175 : 0 : hw_attr->queue_attr = (struct rte_flow_queue_attr *)(hw_attr + 1);
10176 [ # # ]: 0 : for (i = 0; i < nb_queue; ++i)
10177 : 0 : memcpy(&hw_attr->queue_attr[i], queue_attr[i], sizeof(hw_attr->queue_attr[i]));
10178 : : return hw_attr;
10179 : : }
10180 : :
10181 : : /**
10182 : : * Compares the preserved HWS configuration with the provided one.
10183 : : *
10184 : : * @param[in] hw_attr
10185 : : * Pointer to preserved HWS configuration.
10186 : : * @param[in] new_pa
10187 : : * Port configuration attributes to compare.
10188 : : * @param[in] new_nbq
10189 : : * Number of queues to compare.
10190 : : * @param[in] new_qa
10191 : : * Array that holds attributes for each flow queue.
10192 : : *
10193 : : * @return
10194 : : * True if configurations are the same, false otherwise.
10195 : : */
10196 : : static bool
10197 : 0 : flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
10198 : : const struct rte_flow_port_attr *new_pa,
10199 : : const uint16_t new_nbq,
10200 : : const struct rte_flow_queue_attr *new_qa[])
10201 : : {
10202 : : const struct rte_flow_port_attr *old_pa = &hw_attr->port_attr;
10203 : 0 : const uint16_t old_nbq = hw_attr->nb_queue;
10204 : 0 : const struct rte_flow_queue_attr *old_qa = hw_attr->queue_attr;
10205 : : unsigned int i;
10206 : :
10207 [ # # ]: 0 : if (old_pa->nb_counters != new_pa->nb_counters ||
10208 [ # # ]: 0 : old_pa->nb_aging_objects != new_pa->nb_aging_objects ||
10209 [ # # ]: 0 : old_pa->nb_meters != new_pa->nb_meters ||
10210 [ # # ]: 0 : old_pa->nb_conn_tracks != new_pa->nb_conn_tracks ||
10211 [ # # ]: 0 : old_pa->flags != new_pa->flags)
10212 : : return false;
10213 [ # # ]: 0 : if (old_nbq != new_nbq)
10214 : : return false;
10215 [ # # ]: 0 : for (i = 0; i < old_nbq; ++i)
10216 [ # # ]: 0 : if (old_qa[i].size != new_qa[i]->size)
10217 : : return false;
10218 : : return true;
10219 : : }
10220 : :
10221 : : /*
10222 : : * No need to explicitly release drop action templates on port stop.
10223 : : * Drop action templates release with other action templates during
10224 : : * mlx5_dev_close -> flow_hw_resource_release -> flow_hw_actions_template_destroy
10225 : : */
10226 : : static void
10227 : 0 : flow_hw_action_template_drop_release(struct rte_eth_dev *dev)
10228 : : {
10229 : : int i;
10230 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10231 : :
10232 [ # # ]: 0 : for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
10233 [ # # ]: 0 : if (!priv->action_template_drop[i])
10234 : 0 : continue;
10235 : 0 : flow_hw_actions_template_destroy(dev,
10236 : : priv->action_template_drop[i],
10237 : : NULL);
10238 : 0 : priv->action_template_drop[i] = NULL;
10239 : : }
10240 : 0 : }
10241 : :
10242 : : static int
10243 : 0 : flow_hw_action_template_drop_init(struct rte_eth_dev *dev,
10244 : : struct rte_flow_error *error)
10245 : : {
10246 : 0 : const struct rte_flow_action drop[2] = {
10247 : : [0] = { .type = RTE_FLOW_ACTION_TYPE_DROP },
10248 : : [1] = { .type = RTE_FLOW_ACTION_TYPE_END },
10249 : : };
10250 : : const struct rte_flow_action *actions = drop;
10251 : : const struct rte_flow_action *masks = drop;
10252 : 0 : const struct rte_flow_actions_template_attr attr[MLX5DR_TABLE_TYPE_MAX] = {
10253 : : [MLX5DR_TABLE_TYPE_NIC_RX] = { .ingress = 1 },
10254 : : [MLX5DR_TABLE_TYPE_NIC_TX] = { .egress = 1 },
10255 : : [MLX5DR_TABLE_TYPE_FDB] = { .transfer = 1 }
10256 : : };
10257 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10258 : :
10259 : 0 : priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX] =
10260 : 0 : flow_hw_actions_template_create(dev,
10261 : : &attr[MLX5DR_TABLE_TYPE_NIC_RX],
10262 : : actions, masks, error);
10263 [ # # ]: 0 : if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX])
10264 : : return -1;
10265 : 0 : priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX] =
10266 : 0 : flow_hw_actions_template_create(dev,
10267 : : &attr[MLX5DR_TABLE_TYPE_NIC_TX],
10268 : : actions, masks, error);
10269 [ # # ]: 0 : if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX])
10270 : : return -1;
10271 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en && priv->master) {
10272 : 0 : priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB] =
10273 : 0 : flow_hw_actions_template_create(dev,
10274 : : &attr[MLX5DR_TABLE_TYPE_FDB],
10275 : : actions, masks, error);
10276 [ # # ]: 0 : if (!priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB])
10277 : 0 : return -1;
10278 : : }
10279 : : return 0;
10280 : : }
10281 : :
10282 : : static __rte_always_inline struct rte_ring *
10283 : : mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char *str)
10284 : : {
10285 : : char mz_name[RTE_MEMZONE_NAMESIZE];
10286 : :
10287 : : snprintf(mz_name, sizeof(mz_name), "port_%u_%s_%u", port_id, str, queue);
10288 : 0 : return rte_ring_create(mz_name, size, SOCKET_ID_ANY,
10289 : : RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
10290 : : }
10291 : :
10292 : : static int
10293 : 0 : flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
10294 : : uint16_t nb_queue,
10295 : : const struct rte_flow_queue_attr *queue_attr[],
10296 : : struct rte_flow_error *error)
10297 : : {
10298 : : uint32_t size;
10299 : : unsigned int i;
10300 : :
10301 [ # # ]: 0 : if (port_attr == NULL)
10302 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10303 : : "Port attributes must be non-NULL");
10304 : :
10305 [ # # ]: 0 : if (nb_queue == 0)
10306 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10307 : : "At least one flow queue is required");
10308 : :
10309 [ # # ]: 0 : if (queue_attr == NULL)
10310 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10311 : : "Queue attributes must be non-NULL");
10312 : :
10313 : 0 : size = queue_attr[0]->size;
10314 [ # # ]: 0 : for (i = 1; i < nb_queue; ++i) {
10315 [ # # ]: 0 : if (queue_attr[i]->size != size)
10316 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10317 : : NULL,
10318 : : "All flow queues must have the same size");
10319 : : }
10320 : :
10321 : : return 0;
10322 : : }
10323 : :
10324 : : /**
10325 : : * Configure port HWS resources.
10326 : : *
10327 : : * @param[in] dev
10328 : : * Pointer to the rte_eth_dev structure.
10329 : : * @param[in] port_attr
10330 : : * Port configuration attributes.
10331 : : * @param[in] nb_queue
10332 : : * Number of queue.
10333 : : * @param[in] queue_attr
10334 : : * Array that holds attributes for each flow queue.
10335 : : * @param[out] error
10336 : : * Pointer to error structure.
10337 : : *
10338 : : * @return
10339 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
10340 : : */
10341 : : static int
10342 : 0 : flow_hw_configure(struct rte_eth_dev *dev,
10343 : : const struct rte_flow_port_attr *port_attr,
10344 : : uint16_t nb_queue,
10345 : : const struct rte_flow_queue_attr *queue_attr[],
10346 : : struct rte_flow_error *error)
10347 : : {
10348 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10349 : : struct mlx5_priv *host_priv = NULL;
10350 : : struct mlx5dr_context *dr_ctx = NULL;
10351 : 0 : struct mlx5dr_context_attr dr_ctx_attr = {0};
10352 : : struct mlx5_hw_q *hw_q;
10353 : : struct mlx5_hw_q_job *job = NULL;
10354 : : uint32_t mem_size, i, j;
10355 : 0 : struct mlx5_indexed_pool_config cfg = {
10356 : : .size = sizeof(struct mlx5_action_construct_data),
10357 : : .trunk_size = 4096,
10358 : : .need_lock = 1,
10359 : 0 : .release_mem_en = !!priv->sh->config.reclaim_mode,
10360 : : .malloc = mlx5_malloc,
10361 : : .free = mlx5_free,
10362 : : .type = "mlx5_hw_action_construct_data",
10363 : : };
10364 : : /*
10365 : : * Adds one queue to be used by PMD.
10366 : : * The last queue will be used by the PMD.
10367 : : */
10368 : : uint16_t nb_q_updated = 0;
10369 : : struct rte_flow_queue_attr **_queue_attr = NULL;
10370 : 0 : struct rte_flow_queue_attr ctrl_queue_attr = {0};
10371 [ # # # # ]: 0 : bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
10372 : : int ret = 0;
10373 : : uint32_t action_flags;
10374 : :
10375 [ # # ]: 0 : if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error))
10376 : 0 : return -rte_errno;
10377 : : /*
10378 : : * Calling rte_flow_configure() again is allowed if and only if
10379 : : * provided configuration matches the initially provided one.
10380 : : */
10381 [ # # ]: 0 : if (priv->dr_ctx) {
10382 : : MLX5_ASSERT(priv->hw_attr != NULL);
10383 [ # # ]: 0 : for (i = 0; i < priv->nb_queue; i++) {
10384 : 0 : hw_q = &priv->hw_q[i];
10385 : : /* Make sure all queues are empty. */
10386 [ # # ]: 0 : if (hw_q->size != hw_q->job_idx) {
10387 : 0 : rte_errno = EBUSY;
10388 : 0 : goto err;
10389 : : }
10390 : : }
10391 [ # # ]: 0 : if (flow_hw_compare_config(priv->hw_attr, port_attr, nb_queue, queue_attr))
10392 : : return 0;
10393 : : else
10394 : 0 : return rte_flow_error_set(error, ENOTSUP,
10395 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10396 : : "Changing HWS configuration attributes "
10397 : : "is not supported");
10398 : : }
10399 : 0 : priv->hw_attr = flow_hw_alloc_copy_config(port_attr, nb_queue, queue_attr, error);
10400 [ # # ]: 0 : if (!priv->hw_attr) {
10401 : 0 : ret = -rte_errno;
10402 : 0 : goto err;
10403 : : }
10404 : 0 : ctrl_queue_attr.size = queue_attr[0]->size;
10405 : 0 : nb_q_updated = nb_queue + 1;
10406 : 0 : _queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
10407 : : nb_q_updated *
10408 : : sizeof(struct rte_flow_queue_attr *),
10409 : : 64, SOCKET_ID_ANY);
10410 [ # # ]: 0 : if (!_queue_attr) {
10411 : 0 : rte_errno = ENOMEM;
10412 : 0 : goto err;
10413 : : }
10414 : :
10415 : 0 : memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue);
10416 : 0 : _queue_attr[nb_queue] = &ctrl_queue_attr;
10417 : 0 : priv->acts_ipool = mlx5_ipool_create(&cfg);
10418 [ # # ]: 0 : if (!priv->acts_ipool)
10419 : 0 : goto err;
10420 : : /* Allocate the queue job descriptor LIFO. */
10421 : 0 : mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
10422 [ # # ]: 0 : for (i = 0; i < nb_q_updated; i++) {
10423 : 0 : mem_size += (sizeof(struct mlx5_hw_q_job *) +
10424 : 0 : sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;
10425 : : }
10426 : 0 : priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
10427 : : 64, SOCKET_ID_ANY);
10428 [ # # ]: 0 : if (!priv->hw_q) {
10429 : 0 : rte_errno = ENOMEM;
10430 : 0 : goto err;
10431 : : }
10432 [ # # ]: 0 : for (i = 0; i < nb_q_updated; i++) {
10433 : 0 : priv->hw_q[i].job_idx = _queue_attr[i]->size;
10434 : 0 : priv->hw_q[i].size = _queue_attr[i]->size;
10435 : 0 : priv->hw_q[i].ongoing_flow_ops = 0;
10436 [ # # ]: 0 : if (i == 0)
10437 : 0 : priv->hw_q[i].job = (struct mlx5_hw_q_job **)
10438 : 0 : &priv->hw_q[nb_q_updated];
10439 : : else
10440 : 0 : priv->hw_q[i].job = (struct mlx5_hw_q_job **)&job[_queue_attr[i - 1]->size];
10441 : 0 : job = (struct mlx5_hw_q_job *)
10442 : 0 : &priv->hw_q[i].job[_queue_attr[i]->size];
10443 [ # # ]: 0 : for (j = 0; j < _queue_attr[i]->size; j++)
10444 : 0 : priv->hw_q[i].job[j] = &job[j];
10445 : : /* Notice ring name length is limited. */
10446 : 0 : priv->hw_q[i].indir_cq = mlx5_hwq_ring_create
10447 : 0 : (dev->data->port_id, i, _queue_attr[i]->size, "indir_act_cq");
10448 [ # # ]: 0 : if (!priv->hw_q[i].indir_cq)
10449 : 0 : goto err;
10450 : 0 : priv->hw_q[i].indir_iq = mlx5_hwq_ring_create
10451 : 0 : (dev->data->port_id, i, _queue_attr[i]->size, "indir_act_iq");
10452 [ # # ]: 0 : if (!priv->hw_q[i].indir_iq)
10453 : 0 : goto err;
10454 : 0 : priv->hw_q[i].flow_transfer_pending = mlx5_hwq_ring_create
10455 : 0 : (dev->data->port_id, i, _queue_attr[i]->size, "tx_pending");
10456 [ # # ]: 0 : if (!priv->hw_q[i].flow_transfer_pending)
10457 : 0 : goto err;
10458 : 0 : priv->hw_q[i].flow_transfer_completed = mlx5_hwq_ring_create
10459 : 0 : (dev->data->port_id, i, _queue_attr[i]->size, "tx_done");
10460 [ # # ]: 0 : if (!priv->hw_q[i].flow_transfer_completed)
10461 : 0 : goto err;
10462 : : }
10463 : 0 : dr_ctx_attr.pd = priv->sh->cdev->pd;
10464 : 0 : dr_ctx_attr.queues = nb_q_updated;
10465 : : /* Queue size should all be the same. Take the first one. */
10466 : 0 : dr_ctx_attr.queue_size = _queue_attr[0]->size;
10467 [ # # ]: 0 : if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
10468 : : struct rte_eth_dev *host_dev = NULL;
10469 : : uint16_t port_id;
10470 : :
10471 : : MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
10472 [ # # ]: 0 : if (is_proxy) {
10473 : 0 : DRV_LOG(ERR, "cross vHCA shared mode not supported "
10474 : : "for E-Switch confgiurations");
10475 : 0 : rte_errno = ENOTSUP;
10476 : 0 : goto err;
10477 : : }
10478 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
10479 [ # # ]: 0 : if (port_id == port_attr->host_port_id) {
10480 : 0 : host_dev = &rte_eth_devices[port_id];
10481 : 0 : break;
10482 : : }
10483 : : }
10484 [ # # ]: 0 : if (!host_dev || host_dev == dev ||
10485 [ # # # # ]: 0 : !host_dev->data || !host_dev->data->dev_private) {
10486 : 0 : DRV_LOG(ERR, "Invalid cross vHCA host port %u",
10487 : : port_attr->host_port_id);
10488 : 0 : rte_errno = EINVAL;
10489 : 0 : goto err;
10490 : : }
10491 : : host_priv = host_dev->data->dev_private;
10492 [ # # ]: 0 : if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
10493 : 0 : DRV_LOG(ERR, "Sibling ports %u and %u do not "
10494 : : "require cross vHCA sharing mode",
10495 : : dev->data->port_id, port_attr->host_port_id);
10496 : 0 : rte_errno = EINVAL;
10497 : 0 : goto err;
10498 : : }
10499 [ # # ]: 0 : if (host_priv->shared_host) {
10500 : 0 : DRV_LOG(ERR, "Host port %u is not the sharing base",
10501 : : port_attr->host_port_id);
10502 : 0 : rte_errno = EINVAL;
10503 : 0 : goto err;
10504 : : }
10505 [ # # ]: 0 : if (port_attr->nb_counters ||
10506 [ # # ]: 0 : port_attr->nb_aging_objects ||
10507 [ # # ]: 0 : port_attr->nb_meters ||
10508 [ # # ]: 0 : port_attr->nb_conn_tracks) {
10509 : 0 : DRV_LOG(ERR,
10510 : : "Object numbers on guest port must be zeros");
10511 : 0 : rte_errno = EINVAL;
10512 : 0 : goto err;
10513 : : }
10514 : 0 : dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
10515 : 0 : priv->shared_host = host_dev;
10516 : 0 : __atomic_fetch_add(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
10517 : : }
10518 : 0 : dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
10519 : : /* rte_errno has been updated by HWS layer. */
10520 [ # # ]: 0 : if (!dr_ctx)
10521 : 0 : goto err;
10522 : 0 : priv->dr_ctx = dr_ctx;
10523 : 0 : priv->nb_queue = nb_q_updated;
10524 : : rte_spinlock_init(&priv->hw_ctrl_lock);
10525 : 0 : LIST_INIT(&priv->hw_ctrl_flows);
10526 : 0 : LIST_INIT(&priv->hw_ext_ctrl_flows);
10527 : 0 : ret = flow_hw_action_template_drop_init(dev, error);
10528 [ # # ]: 0 : if (ret)
10529 : 0 : goto err;
10530 : 0 : ret = flow_hw_create_ctrl_rx_tables(dev);
10531 [ # # ]: 0 : if (ret) {
10532 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10533 : : "Failed to set up Rx control flow templates");
10534 : 0 : goto err;
10535 : : }
10536 : : /* Initialize quotas */
10537 [ # # # # : 0 : if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) {
# # ]
10538 : 0 : ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
10539 [ # # ]: 0 : if (ret) {
10540 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10541 : : "Failed to initialize quota.");
10542 : 0 : goto err;
10543 : : }
10544 : : }
10545 : : /* Initialize meter library*/
10546 [ # # # # : 0 : if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
# # ]
10547 [ # # ]: 0 : if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
10548 : 0 : goto err;
10549 : : /* Add global actions. */
10550 [ # # ]: 0 : for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
10551 : : uint32_t act_flags = 0;
10552 : :
10553 : 0 : act_flags = mlx5_hw_act_flag[i][0] | mlx5_hw_act_flag[i][1];
10554 [ # # ]: 0 : if (is_proxy)
10555 : 0 : act_flags |= mlx5_hw_act_flag[i][2];
10556 : 0 : priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
10557 [ # # ]: 0 : if (!priv->hw_drop[i])
10558 : 0 : goto err;
10559 : 0 : priv->hw_tag[i] = mlx5dr_action_create_tag
10560 : : (priv->dr_ctx, mlx5_hw_act_flag[i][0]);
10561 [ # # ]: 0 : if (!priv->hw_tag[i])
10562 : 0 : goto err;
10563 : : }
10564 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) {
10565 : 0 : ret = flow_hw_setup_tx_repr_tagging(dev, error);
10566 [ # # ]: 0 : if (ret)
10567 : 0 : goto err;
10568 : : }
10569 : : /*
10570 : : * DEFAULT_MISS action have different behaviors in different domains.
10571 : : * In FDB, it will steering the packets to the E-switch manager.
10572 : : * In NIC Rx root, it will steering the packet to the kernel driver stack.
10573 : : * An action with all bits set in the flag can be created and the HWS
10574 : : * layer will translate it properly when being used in different rules.
10575 : : */
10576 : : action_flags = MLX5DR_ACTION_FLAG_ROOT_RX | MLX5DR_ACTION_FLAG_HWS_RX |
10577 : : MLX5DR_ACTION_FLAG_ROOT_TX | MLX5DR_ACTION_FLAG_HWS_TX;
10578 [ # # ]: 0 : if (is_proxy)
10579 : : action_flags |= (MLX5DR_ACTION_FLAG_ROOT_FDB | MLX5DR_ACTION_FLAG_HWS_FDB);
10580 : 0 : priv->hw_def_miss = mlx5dr_action_create_default_miss(priv->dr_ctx, action_flags);
10581 [ # # ]: 0 : if (!priv->hw_def_miss)
10582 : 0 : goto err;
10583 [ # # ]: 0 : if (is_proxy) {
10584 : 0 : ret = flow_hw_create_vport_actions(priv);
10585 [ # # ]: 0 : if (ret) {
10586 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10587 : : NULL, "Failed to create vport actions.");
10588 : 0 : goto err;
10589 : : }
10590 : 0 : ret = flow_hw_create_ctrl_tables(dev, error);
10591 [ # # ]: 0 : if (ret)
10592 : 0 : goto err;
10593 : : }
10594 : 0 : if (!priv->shared_host)
10595 : : flow_hw_create_send_to_kernel_actions(priv);
10596 [ # # # # : 0 : if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
# # ]
10597 [ # # ]: 0 : if (!priv->shared_host) {
10598 : 0 : mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
10599 : : sizeof(*priv->ct_mng);
10600 : 0 : priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
10601 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
10602 [ # # ]: 0 : if (!priv->ct_mng)
10603 : 0 : goto err;
10604 [ # # ]: 0 : if (mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng, nb_q_updated))
10605 : 0 : goto err;
10606 : : }
10607 : 0 : priv->hws_ctpool = flow_hw_ct_pool_create(dev, port_attr);
10608 [ # # ]: 0 : if (!priv->hws_ctpool)
10609 : 0 : goto err;
10610 : 0 : priv->sh->ct_aso_en = 1;
10611 : : }
10612 [ # # # # : 0 : if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
# # ]
10613 : 0 : priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
10614 : : nb_queue);
10615 [ # # ]: 0 : if (priv->hws_cpool == NULL)
10616 : 0 : goto err;
10617 : : }
10618 [ # # ]: 0 : if (port_attr->nb_aging_objects) {
10619 [ # # ]: 0 : if (port_attr->nb_counters == 0) {
10620 : : /*
10621 : : * Aging management uses counter. Number counters
10622 : : * requesting should take into account a counter for
10623 : : * each flow rules containing AGE without counter.
10624 : : */
10625 : 0 : DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
10626 : : "without counters requesting.",
10627 : : dev->data->port_id,
10628 : : port_attr->nb_aging_objects);
10629 : 0 : rte_errno = EINVAL;
10630 : 0 : goto err;
10631 : : }
10632 : 0 : ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue);
10633 [ # # ]: 0 : if (ret < 0) {
10634 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10635 : : NULL, "Failed to init age pool.");
10636 : 0 : goto err;
10637 : : }
10638 : : }
10639 : 0 : ret = flow_hw_create_vlan(dev);
10640 [ # # ]: 0 : if (ret) {
10641 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10642 : : NULL, "Failed to VLAN actions.");
10643 : 0 : goto err;
10644 : : }
10645 [ # # ]: 0 : if (flow_hw_create_nat64_actions(priv, error))
10646 : 0 : DRV_LOG(WARNING, "Cannot create NAT64 action on port %u, "
10647 : : "please check the FW version", dev->data->port_id);
10648 : : if (_queue_attr)
10649 : 0 : mlx5_free(_queue_attr);
10650 [ # # ]: 0 : if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
10651 : 0 : priv->hws_strict_queue = 1;
10652 : 0 : dev->flow_fp_ops = &mlx5_flow_hw_fp_ops;
10653 : 0 : return 0;
10654 : 0 : err:
10655 : 0 : priv->hws_strict_queue = 0;
10656 : 0 : flow_hw_destroy_nat64_actions(priv);
10657 : 0 : flow_hw_destroy_vlan(dev);
10658 [ # # ]: 0 : if (priv->hws_age_req)
10659 : 0 : mlx5_hws_age_pool_destroy(priv);
10660 [ # # ]: 0 : if (priv->hws_cpool) {
10661 : 0 : mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
10662 : 0 : priv->hws_cpool = NULL;
10663 : : }
10664 [ # # ]: 0 : if (priv->hws_ctpool) {
10665 : 0 : flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
10666 : 0 : priv->hws_ctpool = NULL;
10667 : : }
10668 [ # # ]: 0 : if (priv->ct_mng) {
10669 : 0 : flow_hw_ct_mng_destroy(dev, priv->ct_mng);
10670 : 0 : priv->ct_mng = NULL;
10671 : : }
10672 : 0 : flow_hw_destroy_send_to_kernel_action(priv);
10673 : 0 : flow_hw_cleanup_ctrl_fdb_tables(dev);
10674 : 0 : flow_hw_free_vport_actions(priv);
10675 [ # # ]: 0 : if (priv->hw_def_miss) {
10676 : 0 : mlx5dr_action_destroy(priv->hw_def_miss);
10677 : 0 : priv->hw_def_miss = NULL;
10678 : : }
10679 : 0 : flow_hw_cleanup_tx_repr_tagging(dev);
10680 [ # # ]: 0 : for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
10681 [ # # ]: 0 : if (priv->hw_drop[i]) {
10682 : 0 : mlx5dr_action_destroy(priv->hw_drop[i]);
10683 : 0 : priv->hw_drop[i] = NULL;
10684 : : }
10685 [ # # ]: 0 : if (priv->hw_tag[i]) {
10686 : 0 : mlx5dr_action_destroy(priv->hw_tag[i]);
10687 : 0 : priv->hw_tag[i] = NULL;
10688 : : }
10689 : : }
10690 : 0 : mlx5_flow_meter_uninit(dev);
10691 : 0 : mlx5_flow_quota_destroy(dev);
10692 : 0 : flow_hw_cleanup_ctrl_rx_tables(dev);
10693 : 0 : flow_hw_action_template_drop_release(dev);
10694 [ # # ]: 0 : if (dr_ctx) {
10695 : 0 : claim_zero(mlx5dr_context_close(dr_ctx));
10696 : 0 : priv->dr_ctx = NULL;
10697 : : }
10698 [ # # ]: 0 : if (priv->shared_host) {
10699 : 0 : struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
10700 : :
10701 : 0 : __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
10702 : 0 : priv->shared_host = NULL;
10703 : : }
10704 [ # # ]: 0 : if (priv->hw_q) {
10705 [ # # ]: 0 : for (i = 0; i < nb_q_updated; i++) {
10706 : 0 : rte_ring_free(priv->hw_q[i].indir_iq);
10707 : 0 : rte_ring_free(priv->hw_q[i].indir_cq);
10708 : 0 : rte_ring_free(priv->hw_q[i].flow_transfer_pending);
10709 : 0 : rte_ring_free(priv->hw_q[i].flow_transfer_completed);
10710 : : }
10711 : 0 : mlx5_free(priv->hw_q);
10712 : 0 : priv->hw_q = NULL;
10713 : : }
10714 [ # # ]: 0 : if (priv->acts_ipool) {
10715 : 0 : mlx5_ipool_destroy(priv->acts_ipool);
10716 : 0 : priv->acts_ipool = NULL;
10717 : : }
10718 : 0 : mlx5_free(priv->hw_attr);
10719 : 0 : priv->hw_attr = NULL;
10720 : 0 : priv->nb_queue = 0;
10721 [ # # ]: 0 : if (_queue_attr)
10722 : 0 : mlx5_free(_queue_attr);
10723 : : /* Do not overwrite the internal errno information. */
10724 [ # # ]: 0 : if (ret)
10725 : : return ret;
10726 : 0 : return rte_flow_error_set(error, rte_errno,
10727 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10728 : : "fail to configure port");
10729 : : }
10730 : :
10731 : : /**
10732 : : * Release HWS resources.
10733 : : *
10734 : : * @param[in] dev
10735 : : * Pointer to the rte_eth_dev structure.
10736 : : */
10737 : : void
10738 : 0 : flow_hw_resource_release(struct rte_eth_dev *dev)
10739 : : {
10740 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10741 : : struct rte_flow_template_table *tbl;
10742 : : struct rte_flow_pattern_template *it;
10743 : : struct rte_flow_actions_template *at;
10744 : : struct mlx5_flow_group *grp;
10745 : : uint32_t i;
10746 : :
10747 [ # # ]: 0 : if (!priv->dr_ctx)
10748 : : return;
10749 : 0 : dev->flow_fp_ops = &rte_flow_fp_default_ops;
10750 : 0 : flow_hw_rxq_flag_set(dev, false);
10751 : 0 : flow_hw_flush_all_ctrl_flows(dev);
10752 : 0 : flow_hw_cleanup_ctrl_fdb_tables(dev);
10753 : 0 : flow_hw_cleanup_tx_repr_tagging(dev);
10754 : 0 : flow_hw_cleanup_ctrl_rx_tables(dev);
10755 : 0 : flow_hw_action_template_drop_release(dev);
10756 [ # # ]: 0 : while (!LIST_EMPTY(&priv->flow_hw_grp)) {
10757 : : grp = LIST_FIRST(&priv->flow_hw_grp);
10758 : 0 : flow_hw_group_unset_miss_group(dev, grp, NULL);
10759 : : }
10760 [ # # ]: 0 : while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {
10761 : : tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
10762 : 0 : flow_hw_table_destroy(dev, tbl, NULL);
10763 : : }
10764 [ # # ]: 0 : while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
10765 : : tbl = LIST_FIRST(&priv->flow_hw_tbl);
10766 : 0 : flow_hw_table_destroy(dev, tbl, NULL);
10767 : : }
10768 [ # # ]: 0 : while (!LIST_EMPTY(&priv->flow_hw_itt)) {
10769 : : it = LIST_FIRST(&priv->flow_hw_itt);
10770 : 0 : flow_hw_pattern_template_destroy(dev, it, NULL);
10771 : : }
10772 [ # # ]: 0 : while (!LIST_EMPTY(&priv->flow_hw_at)) {
10773 : : at = LIST_FIRST(&priv->flow_hw_at);
10774 : 0 : flow_hw_actions_template_destroy(dev, at, NULL);
10775 : : }
10776 [ # # ]: 0 : for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
10777 [ # # ]: 0 : if (priv->hw_drop[i])
10778 : 0 : mlx5dr_action_destroy(priv->hw_drop[i]);
10779 [ # # ]: 0 : if (priv->hw_tag[i])
10780 : 0 : mlx5dr_action_destroy(priv->hw_tag[i]);
10781 : : }
10782 [ # # ]: 0 : if (priv->hw_def_miss)
10783 : 0 : mlx5dr_action_destroy(priv->hw_def_miss);
10784 : 0 : flow_hw_destroy_nat64_actions(priv);
10785 : 0 : flow_hw_destroy_vlan(dev);
10786 : 0 : flow_hw_destroy_send_to_kernel_action(priv);
10787 : 0 : flow_hw_free_vport_actions(priv);
10788 [ # # ]: 0 : if (priv->acts_ipool) {
10789 : 0 : mlx5_ipool_destroy(priv->acts_ipool);
10790 : 0 : priv->acts_ipool = NULL;
10791 : : }
10792 [ # # ]: 0 : if (priv->hws_age_req)
10793 : 0 : mlx5_hws_age_pool_destroy(priv);
10794 [ # # ]: 0 : if (priv->hws_cpool) {
10795 : 0 : mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
10796 : 0 : priv->hws_cpool = NULL;
10797 : : }
10798 [ # # ]: 0 : if (priv->hws_ctpool) {
10799 : 0 : flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
10800 : 0 : priv->hws_ctpool = NULL;
10801 : : }
10802 [ # # ]: 0 : if (priv->ct_mng) {
10803 : 0 : flow_hw_ct_mng_destroy(dev, priv->ct_mng);
10804 : 0 : priv->ct_mng = NULL;
10805 : : }
10806 : 0 : mlx5_flow_quota_destroy(dev);
10807 [ # # ]: 0 : for (i = 0; i < priv->nb_queue; i++) {
10808 : 0 : rte_ring_free(priv->hw_q[i].indir_iq);
10809 : 0 : rte_ring_free(priv->hw_q[i].indir_cq);
10810 : 0 : rte_ring_free(priv->hw_q[i].flow_transfer_pending);
10811 : 0 : rte_ring_free(priv->hw_q[i].flow_transfer_completed);
10812 : : }
10813 : 0 : mlx5_free(priv->hw_q);
10814 : 0 : priv->hw_q = NULL;
10815 [ # # ]: 0 : if (priv->shared_host) {
10816 : 0 : struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
10817 : 0 : __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
10818 : 0 : priv->shared_host = NULL;
10819 : : }
10820 : 0 : mlx5_free(priv->hw_attr);
10821 : 0 : priv->hw_attr = NULL;
10822 : 0 : priv->nb_queue = 0;
10823 : : }
10824 : :
10825 : : /* Sets vport tag and mask, for given port, used in HWS rules. */
10826 : : void
10827 : 0 : flow_hw_set_port_info(struct rte_eth_dev *dev)
10828 : : {
10829 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10830 : 0 : uint16_t port_id = dev->data->port_id;
10831 : : struct flow_hw_port_info *info;
10832 : :
10833 : : MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
10834 : 0 : info = &mlx5_flow_hw_port_infos[port_id];
10835 : 0 : info->regc_mask = priv->vport_meta_mask;
10836 [ # # ]: 0 : info->regc_value = priv->vport_meta_tag;
10837 [ # # ]: 0 : info->is_wire = mlx5_is_port_on_mpesw_device(priv) ? priv->mpesw_uplink : priv->master;
10838 : 0 : }
10839 : :
10840 : : /* Clears vport tag and mask used for HWS rules. */
10841 : : void
10842 : 0 : flow_hw_clear_port_info(struct rte_eth_dev *dev)
10843 : : {
10844 : 0 : uint16_t port_id = dev->data->port_id;
10845 : : struct flow_hw_port_info *info;
10846 : :
10847 : : MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
10848 : 0 : info = &mlx5_flow_hw_port_infos[port_id];
10849 : 0 : info->regc_mask = 0;
10850 : 0 : info->regc_value = 0;
10851 : 0 : info->is_wire = 0;
10852 : 0 : }
10853 : :
10854 : : static int
10855 : 0 : flow_hw_conntrack_destroy(struct rte_eth_dev *dev,
10856 : : uint32_t idx,
10857 : : struct rte_flow_error *error)
10858 : : {
10859 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10860 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
10861 : : struct mlx5_aso_ct_action *ct;
10862 : :
10863 [ # # ]: 0 : if (priv->shared_host)
10864 : 0 : return rte_flow_error_set(error, ENOTSUP,
10865 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10866 : : NULL,
10867 : : "CT destruction is not allowed to guest port");
10868 : 0 : ct = mlx5_ipool_get(pool->cts, idx);
10869 [ # # ]: 0 : if (!ct) {
10870 : 0 : return rte_flow_error_set(error, EINVAL,
10871 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10872 : : NULL,
10873 : : "Invalid CT destruction index");
10874 : : }
10875 : 0 : __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
10876 : : __ATOMIC_RELAXED);
10877 : 0 : mlx5_ipool_free(pool->cts, idx);
10878 : 0 : return 0;
10879 : : }
10880 : :
10881 : : static int
10882 : 0 : flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
10883 : : struct rte_flow_action_conntrack *profile,
10884 : : void *user_data, bool push,
10885 : : struct rte_flow_error *error)
10886 : : {
10887 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10888 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
10889 : : struct mlx5_aso_ct_action *ct;
10890 : :
10891 [ # # ]: 0 : if (priv->shared_host)
10892 : 0 : return rte_flow_error_set(error, ENOTSUP,
10893 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10894 : : NULL,
10895 : : "CT query is not allowed to guest port");
10896 : 0 : ct = mlx5_ipool_get(pool->cts, idx);
10897 [ # # ]: 0 : if (!ct) {
10898 : 0 : return rte_flow_error_set(error, EINVAL,
10899 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10900 : : NULL,
10901 : : "Invalid CT query index");
10902 : : }
10903 : 0 : profile->peer_port = ct->peer;
10904 : 0 : profile->is_original_dir = ct->is_original;
10905 [ # # ]: 0 : if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
10906 : 0 : return rte_flow_error_set(error, EIO,
10907 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10908 : : NULL,
10909 : : "Failed to query CT context");
10910 : : return 0;
10911 : : }
10912 : :
10913 : :
10914 : : static int
10915 : 0 : flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
10916 : : const struct rte_flow_modify_conntrack *action_conf,
10917 : : uint32_t idx, void *user_data, bool push,
10918 : : struct rte_flow_error *error)
10919 : : {
10920 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10921 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
10922 : : struct mlx5_aso_ct_action *ct;
10923 : : const struct rte_flow_action_conntrack *new_prf;
10924 : : int ret = 0;
10925 : :
10926 [ # # ]: 0 : if (priv->shared_host)
10927 : 0 : return rte_flow_error_set(error, ENOTSUP,
10928 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10929 : : NULL,
10930 : : "CT update is not allowed to guest port");
10931 : 0 : ct = mlx5_ipool_get(pool->cts, idx);
10932 [ # # ]: 0 : if (!ct) {
10933 : 0 : return rte_flow_error_set(error, EINVAL,
10934 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10935 : : NULL,
10936 : : "Invalid CT update index");
10937 : : }
10938 : 0 : new_prf = &action_conf->new_ct;
10939 [ # # ]: 0 : if (action_conf->direction)
10940 : 0 : ct->is_original = !!new_prf->is_original_dir;
10941 [ # # ]: 0 : if (action_conf->state) {
10942 : : /* Only validate the profile when it needs to be updated. */
10943 : 0 : ret = mlx5_validate_action_ct(dev, new_prf, error);
10944 [ # # ]: 0 : if (ret)
10945 : : return ret;
10946 : 0 : ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
10947 : : user_data, push);
10948 [ # # ]: 0 : if (ret)
10949 : 0 : return rte_flow_error_set(error, EIO,
10950 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10951 : : NULL,
10952 : : "Failed to send CT context update WQE");
10953 [ # # ]: 0 : if (queue != MLX5_HW_INV_QUEUE)
10954 : : return 0;
10955 : : /* Block until ready or a failure in synchronous mode. */
10956 : 0 : ret = mlx5_aso_ct_available(priv->sh, queue, ct);
10957 [ # # ]: 0 : if (ret)
10958 : 0 : rte_flow_error_set(error, rte_errno,
10959 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10960 : : NULL,
10961 : : "Timeout to get the CT update");
10962 : : }
10963 : : return ret;
10964 : : }
10965 : :
10966 : : static struct rte_flow_action_handle *
10967 : 0 : flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
10968 : : const struct rte_flow_action_conntrack *pro,
10969 : : void *user_data, bool push,
10970 : : struct rte_flow_error *error)
10971 : : {
10972 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10973 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
10974 : : struct mlx5_aso_ct_action *ct;
10975 : 0 : uint32_t ct_idx = 0;
10976 : : int ret;
10977 : : bool async = !!(queue != MLX5_HW_INV_QUEUE);
10978 : :
10979 [ # # ]: 0 : if (priv->shared_host) {
10980 : 0 : rte_flow_error_set(error, ENOTSUP,
10981 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10982 : : NULL,
10983 : : "CT create is not allowed to guest port");
10984 : 0 : return NULL;
10985 : : }
10986 [ # # ]: 0 : if (!pool) {
10987 : 0 : rte_flow_error_set(error, EINVAL,
10988 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10989 : : "CT is not enabled");
10990 : 0 : return 0;
10991 : : }
10992 : 0 : ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
10993 [ # # ]: 0 : if (!ct) {
10994 : 0 : rte_flow_error_set(error, rte_errno,
10995 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
10996 : : "Failed to allocate CT object");
10997 : 0 : return 0;
10998 : : }
10999 : 0 : ct->offset = ct_idx - 1;
11000 : 0 : ct->is_original = !!pro->is_original_dir;
11001 : 0 : ct->peer = pro->peer_port;
11002 : 0 : ct->pool = pool;
11003 [ # # ]: 0 : if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
11004 : 0 : mlx5_ipool_free(pool->cts, ct_idx);
11005 : 0 : rte_flow_error_set(error, EBUSY,
11006 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11007 : : "Failed to update CT");
11008 : 0 : return 0;
11009 : : }
11010 [ # # ]: 0 : if (!async) {
11011 : 0 : ret = mlx5_aso_ct_available(priv->sh, queue, ct);
11012 [ # # ]: 0 : if (ret) {
11013 : 0 : mlx5_ipool_free(pool->cts, ct_idx);
11014 : 0 : rte_flow_error_set(error, rte_errno,
11015 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11016 : : NULL,
11017 : : "Timeout to get the CT update");
11018 : 0 : return 0;
11019 : : }
11020 : : }
11021 : 0 : return MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(ct_idx);
11022 : : }
11023 : :
11024 : : /**
11025 : : * Validate shared action.
11026 : : *
11027 : : * @param[in] dev
11028 : : * Pointer to the rte_eth_dev structure.
11029 : : * @param[in] queue
11030 : : * Which queue to be used.
11031 : : * @param[in] attr
11032 : : * Operation attribute.
11033 : : * @param[in] conf
11034 : : * Indirect action configuration.
11035 : : * @param[in] action
11036 : : * rte_flow action detail.
11037 : : * @param[in] user_data
11038 : : * Pointer to the user_data.
11039 : : * @param[out] error
11040 : : * Pointer to error structure.
11041 : : *
11042 : : * @return
11043 : : * 0 on success, otherwise negative errno value.
11044 : : */
11045 : : static int
11046 : 0 : flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
11047 : : const struct rte_flow_op_attr *attr,
11048 : : const struct rte_flow_indir_action_conf *conf,
11049 : : const struct rte_flow_action *action,
11050 : : void *user_data,
11051 : : struct rte_flow_error *error)
11052 : : {
11053 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11054 : :
11055 : : RTE_SET_USED(attr);
11056 : : RTE_SET_USED(queue);
11057 : : RTE_SET_USED(user_data);
11058 [ # # # # : 0 : switch (action->type) {
# # # ]
11059 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
11060 [ # # ]: 0 : if (!priv->hws_age_req)
11061 : 0 : return rte_flow_error_set(error, EINVAL,
11062 : : RTE_FLOW_ERROR_TYPE_ACTION,
11063 : : NULL,
11064 : : "aging pool not initialized");
11065 : : break;
11066 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
11067 [ # # ]: 0 : if (!priv->hws_cpool)
11068 : 0 : return rte_flow_error_set(error, EINVAL,
11069 : : RTE_FLOW_ERROR_TYPE_ACTION,
11070 : : NULL,
11071 : : "counters pool not initialized");
11072 : : break;
11073 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
11074 [ # # ]: 0 : if (priv->hws_ctpool == NULL)
11075 : 0 : return rte_flow_error_set(error, EINVAL,
11076 : : RTE_FLOW_ERROR_TYPE_ACTION,
11077 : : NULL,
11078 : : "CT pool not initialized");
11079 : 0 : return mlx5_validate_action_ct(dev, action->conf, error);
11080 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
11081 : 0 : return flow_hw_validate_action_meter_mark(dev, action, true, error);
11082 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
11083 : 0 : return flow_dv_action_validate(dev, conf, action, error);
11084 : : case RTE_FLOW_ACTION_TYPE_QUOTA:
11085 : : return 0;
11086 : 0 : default:
11087 : 0 : return rte_flow_error_set(error, ENOTSUP,
11088 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11089 : : "action type not supported");
11090 : : }
11091 : : return 0;
11092 : : }
11093 : :
11094 : : static __rte_always_inline bool
11095 : : flow_hw_action_push(const struct rte_flow_op_attr *attr)
11096 : : {
11097 [ # # # # : 0 : return attr ? !attr->postpone : true;
# # # # #
# # # #
# ]
11098 : : }
11099 : :
11100 : : static __rte_always_inline struct mlx5_hw_q_job *
11101 : : flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
11102 : : const struct rte_flow_action_handle *handle,
11103 : : void *user_data, void *query_data,
11104 : : enum mlx5_hw_job_type type,
11105 : : enum mlx5_hw_indirect_type indirect_type,
11106 : : struct rte_flow_error *error)
11107 : : {
11108 : : struct mlx5_hw_q_job *job;
11109 : :
11110 [ # # # # : 0 : if (queue == MLX5_HW_INV_QUEUE)
# # # # #
# # # #
# ]
11111 [ # # # # : 0 : queue = CTRL_QUEUE_ID(priv);
# # # # ]
11112 : : job = flow_hw_job_get(priv, queue);
11113 [ # # # # : 0 : if (!job) {
# # # # #
# # # # #
# # # # #
# # # #
# ]
11114 : 0 : rte_flow_error_set(error, ENOMEM,
11115 : : RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
11116 : : "Action destroy failed due to queue full.");
11117 : 0 : return NULL;
11118 : : }
11119 : 0 : job->type = type;
11120 : 0 : job->action = handle;
11121 : 0 : job->user_data = user_data;
11122 : 0 : job->query.user = query_data;
11123 [ # # # # : 0 : job->indirect_type = indirect_type;
# # # # ]
11124 : 0 : return job;
11125 : : }
11126 : :
11127 : : struct mlx5_hw_q_job *
11128 [ # # ]: 0 : mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
11129 : : const struct rte_flow_action_handle *handle,
11130 : : void *user_data, void *query_data,
11131 : : enum mlx5_hw_job_type type,
11132 : : struct rte_flow_error *error)
11133 : : {
11134 : 0 : return flow_hw_action_job_init(priv, queue, handle, user_data, query_data,
11135 : : type, MLX5_HW_INDIRECT_TYPE_LEGACY, error);
11136 : : }
11137 : :
11138 : : static __rte_always_inline void
11139 : : flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
11140 : : struct mlx5_hw_q_job *job,
11141 : : bool push, bool aso, bool status)
11142 : : {
11143 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11144 : :
11145 [ # # ]: 0 : if (queue == MLX5_HW_INV_QUEUE)
11146 : 0 : queue = CTRL_QUEUE_ID(priv);
11147 [ # # # # : 0 : if (likely(status)) {
# # # # #
# # # ]
11148 : : /* 1. add new job to a queue */
11149 [ # # # # : 0 : if (!aso)
# # # # #
# ]
11150 [ # # # # : 0 : rte_ring_enqueue(push ?
# # # # #
# # # #
# ]
11151 : 0 : priv->hw_q[queue].indir_cq :
11152 : 0 : priv->hw_q[queue].indir_iq,
11153 : : job);
11154 : : /* 2. send pending jobs */
11155 [ # # # # : 0 : if (push)
# # # # #
# # # #
# ]
11156 : 0 : __flow_hw_push_action(dev, queue);
11157 : : } else {
11158 : : flow_hw_job_put(priv, job, queue);
11159 : : }
11160 : : }
11161 : :
11162 : : /**
11163 : : * Create shared action.
11164 : : *
11165 : : * @param[in] dev
11166 : : * Pointer to the rte_eth_dev structure.
11167 : : * @param[in] queue
11168 : : * Which queue to be used.
11169 : : * @param[in] attr
11170 : : * Operation attribute.
11171 : : * @param[in] conf
11172 : : * Indirect action configuration.
11173 : : * @param[in] action
11174 : : * rte_flow action detail.
11175 : : * @param[in] user_data
11176 : : * Pointer to the user_data.
11177 : : * @param[out] error
11178 : : * Pointer to error structure.
11179 : : *
11180 : : * @return
11181 : : * Action handle on success, NULL otherwise and rte_errno is set.
11182 : : */
11183 : : static struct rte_flow_action_handle *
11184 : 0 : flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
11185 : : const struct rte_flow_op_attr *attr,
11186 : : const struct rte_flow_indir_action_conf *conf,
11187 : : const struct rte_flow_action *action,
11188 : : void *user_data,
11189 : : struct rte_flow_error *error)
11190 : : {
11191 : : struct rte_flow_action_handle *handle = NULL;
11192 : : struct mlx5_hw_q_job *job = NULL;
11193 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
11194 : : const struct rte_flow_action_age *age;
11195 : : struct mlx5_aso_mtr *aso_mtr;
11196 : : cnt_id_t cnt_id;
11197 : : uint32_t age_idx;
11198 : : bool push = flow_hw_action_push(attr);
11199 : : bool aso = false;
11200 : 0 : bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK;
11201 : :
11202 [ # # ]: 0 : if (attr || force_job) {
11203 : : job = flow_hw_action_job_init(priv, queue, NULL, user_data,
11204 : : NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
11205 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
11206 : : if (!job)
11207 : 0 : return NULL;
11208 : : }
11209 [ # # # # : 0 : switch (action->type) {
# # # ]
11210 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
11211 [ # # ]: 0 : if (priv->hws_strict_queue) {
11212 : 0 : struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
11213 : :
11214 [ # # ]: 0 : if (queue >= info->hw_q_age->nb_rings) {
11215 : 0 : rte_flow_error_set(error, EINVAL,
11216 : : RTE_FLOW_ERROR_TYPE_ACTION,
11217 : : NULL,
11218 : : "Invalid queue ID for indirect AGE.");
11219 : 0 : rte_errno = EINVAL;
11220 : 0 : return NULL;
11221 : : }
11222 : : }
11223 : 0 : age = action->conf;
11224 : 0 : age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
11225 : : 0, error);
11226 [ # # ]: 0 : if (age_idx == 0) {
11227 : 0 : rte_flow_error_set(error, ENODEV,
11228 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11229 : : "AGE are not configured!");
11230 : : } else {
11231 : 0 : age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
11232 : : MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
11233 : 0 : handle =
11234 : 0 : (struct rte_flow_action_handle *)(uintptr_t)age_idx;
11235 : : }
11236 : : break;
11237 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
11238 [ # # ]: 0 : if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
11239 : 0 : rte_flow_error_set(error, ENODEV,
11240 : : RTE_FLOW_ERROR_TYPE_ACTION,
11241 : : NULL,
11242 : : "counter are not configured!");
11243 : : else
11244 : 0 : handle = (struct rte_flow_action_handle *)
11245 : 0 : (uintptr_t)cnt_id;
11246 : : break;
11247 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
11248 : : aso = true;
11249 : 0 : handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
11250 : : push, error);
11251 : 0 : break;
11252 [ # # ]: 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
11253 : : aso = true;
11254 : : aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push, error);
11255 : : if (!aso_mtr)
11256 : : break;
11257 : 0 : handle = (void *)(uintptr_t)job->action;
11258 : 0 : break;
11259 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
11260 : 0 : handle = flow_dv_action_create(dev, conf, action, error);
11261 : 0 : break;
11262 : 0 : case RTE_FLOW_ACTION_TYPE_QUOTA:
11263 : : aso = true;
11264 : 0 : handle = mlx5_quota_alloc(dev, queue, action->conf,
11265 : : job, push, error);
11266 : 0 : break;
11267 : 0 : default:
11268 : 0 : rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11269 : : NULL, "action type not supported");
11270 : 0 : break;
11271 : : }
11272 [ # # ]: 0 : if (job && !force_job) {
11273 : 0 : job->action = handle;
11274 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso,
11275 : : handle != NULL);
11276 : : }
11277 : : return handle;
11278 : : }
11279 : :
11280 : : static int
11281 : 0 : mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
11282 : : const struct rte_flow_update_meter_mark *upd_meter_mark,
11283 : : uint32_t idx, bool push,
11284 : : struct mlx5_hw_q_job *job, struct rte_flow_error *error)
11285 : : {
11286 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11287 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
11288 : : const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark;
11289 : 0 : struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
11290 : : struct mlx5_flow_meter_info *fm;
11291 : :
11292 [ # # ]: 0 : if (!aso_mtr)
11293 : 0 : return rte_flow_error_set(error, EINVAL,
11294 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11295 : : NULL, "Invalid meter_mark update index");
11296 : : fm = &aso_mtr->fm;
11297 [ # # ]: 0 : if (upd_meter_mark->profile_valid)
11298 : 0 : fm->profile = (struct mlx5_flow_meter_profile *)
11299 : 0 : (meter_mark->profile);
11300 [ # # ]: 0 : if (upd_meter_mark->color_mode_valid)
11301 : 0 : fm->color_aware = meter_mark->color_mode;
11302 [ # # ]: 0 : if (upd_meter_mark->state_valid)
11303 : 0 : fm->is_enable = meter_mark->state;
11304 [ # # ]: 0 : aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
11305 : : ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
11306 : : /* Update ASO flow meter by wqe. */
11307 [ # # ]: 0 : if (mlx5_aso_meter_update_by_wqe(priv, queue,
11308 : : aso_mtr, &priv->mtr_bulk, job, push))
11309 : 0 : return rte_flow_error_set(error, EINVAL,
11310 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11311 : : NULL, "Unable to update ASO meter WQE");
11312 : : /* Wait for ASO object completion. */
11313 [ # # # # ]: 0 : if (queue == MLX5_HW_INV_QUEUE &&
11314 : 0 : mlx5_aso_mtr_wait(priv, aso_mtr, true))
11315 : 0 : return rte_flow_error_set(error, EINVAL,
11316 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11317 : : NULL, "Unable to wait for ASO meter CQE");
11318 : : return 0;
11319 : : }
11320 : :
11321 : : /**
11322 : : * Update shared action.
11323 : : *
11324 : : * @param[in] dev
11325 : : * Pointer to the rte_eth_dev structure.
11326 : : * @param[in] queue
11327 : : * Which queue to be used.
11328 : : * @param[in] attr
11329 : : * Operation attribute.
11330 : : * @param[in] handle
11331 : : * Action handle to be updated.
11332 : : * @param[in] update
11333 : : * Update value.
11334 : : * @param[in] user_data
11335 : : * Pointer to the user_data.
11336 : : * @param[out] error
11337 : : * Pointer to error structure.
11338 : : *
11339 : : * @return
11340 : : * 0 on success, negative value otherwise and rte_errno is set.
11341 : : */
11342 : : static int
11343 : 0 : flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
11344 : : const struct rte_flow_op_attr *attr,
11345 : : struct rte_flow_action_handle *handle,
11346 : : const void *update,
11347 : : void *user_data,
11348 : : struct rte_flow_error *error)
11349 : : {
11350 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11351 : : const struct rte_flow_modify_conntrack *ct_conf =
11352 : : (const struct rte_flow_modify_conntrack *)update;
11353 : : struct mlx5_hw_q_job *job = NULL;
11354 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)handle;
11355 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
11356 [ # # ]: 0 : uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
11357 : : int ret = 0;
11358 : : bool push = flow_hw_action_push(attr);
11359 : : bool aso = false;
11360 : 0 : bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
11361 : :
11362 [ # # ]: 0 : if (attr || force_job) {
11363 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
11364 : : NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
11365 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
11366 : : if (!job)
11367 : 0 : return -rte_errno;
11368 : : }
11369 [ # # # # : 0 : switch (type) {
# # ]
11370 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
11371 : 0 : ret = mlx5_hws_age_action_update(priv, idx, update, error);
11372 : 0 : break;
11373 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
11374 [ # # ]: 0 : if (ct_conf->state)
11375 : : aso = true;
11376 : 0 : ret = flow_hw_conntrack_update(dev, queue, update, idx,
11377 : : job, push, error);
11378 : 0 : break;
11379 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
11380 : : aso = true;
11381 : 0 : ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push,
11382 : : job, error);
11383 : 0 : break;
11384 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
11385 : 0 : ret = flow_dv_action_update(dev, handle, update, error);
11386 : 0 : break;
11387 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
11388 : : aso = true;
11389 : 0 : ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
11390 : : job, push, error);
11391 : 0 : break;
11392 : 0 : default:
11393 : : ret = -ENOTSUP;
11394 : 0 : rte_flow_error_set(error, ENOTSUP,
11395 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11396 : : "action type not supported");
11397 : 0 : break;
11398 : : }
11399 [ # # ]: 0 : if (job && !force_job)
11400 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
11401 : : return ret;
11402 : : }
11403 : :
11404 : : /**
11405 : : * Destroy shared action.
11406 : : *
11407 : : * @param[in] dev
11408 : : * Pointer to the rte_eth_dev structure.
11409 : : * @param[in] queue
11410 : : * Which queue to be used.
11411 : : * @param[in] attr
11412 : : * Operation attribute.
11413 : : * @param[in] handle
11414 : : * Action handle to be destroyed.
11415 : : * @param[in] user_data
11416 : : * Pointer to the user_data.
11417 : : * @param[out] error
11418 : : * Pointer to error structure.
11419 : : *
11420 : : * @return
11421 : : * 0 on success, negative value otherwise and rte_errno is set.
11422 : : */
11423 : : static int
11424 : 0 : flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
11425 : : const struct rte_flow_op_attr *attr,
11426 : : struct rte_flow_action_handle *handle,
11427 : : void *user_data,
11428 : : struct rte_flow_error *error)
11429 : : {
11430 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)handle;
11431 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
11432 : 0 : uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
11433 : : uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
11434 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11435 [ # # ]: 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
11436 : : struct mlx5_hw_q_job *job = NULL;
11437 : : struct mlx5_aso_mtr *aso_mtr;
11438 : : struct mlx5_flow_meter_info *fm;
11439 : : bool push = flow_hw_action_push(attr);
11440 : : bool aso = false;
11441 : : int ret = 0;
11442 : 0 : bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
11443 : :
11444 [ # # ]: 0 : if (attr || force_job) {
11445 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
11446 : : NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
11447 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
11448 : : if (!job)
11449 : 0 : return -rte_errno;
11450 : : }
11451 [ # # # # : 0 : switch (type) {
# # # ]
11452 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
11453 : 0 : ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
11454 : 0 : break;
11455 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
11456 [ # # ]: 0 : age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
11457 [ # # ]: 0 : if (age_idx != 0)
11458 : : /*
11459 : : * If this counter belongs to indirect AGE, here is the
11460 : : * time to update the AGE.
11461 : : */
11462 : : mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
11463 [ # # ]: 0 : mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
11464 : : break;
11465 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
11466 : 0 : ret = flow_hw_conntrack_destroy(dev, idx, error);
11467 : 0 : break;
11468 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
11469 : 0 : aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
11470 [ # # ]: 0 : if (!aso_mtr) {
11471 : : ret = -EINVAL;
11472 : 0 : rte_flow_error_set(error, EINVAL,
11473 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11474 : : NULL, "Invalid meter_mark destroy index");
11475 : 0 : break;
11476 : : }
11477 : : fm = &aso_mtr->fm;
11478 : 0 : fm->is_enable = 0;
11479 : : /* Update ASO flow meter by wqe. */
11480 [ # # ]: 0 : if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
11481 : : &priv->mtr_bulk, job, push)) {
11482 : : ret = -EINVAL;
11483 : 0 : rte_flow_error_set(error, EINVAL,
11484 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11485 : : NULL, "Unable to update ASO meter WQE");
11486 : 0 : break;
11487 : : }
11488 : : /* Wait for ASO object completion. */
11489 [ # # # # ]: 0 : if (queue == MLX5_HW_INV_QUEUE &&
11490 : 0 : mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
11491 : : ret = -EINVAL;
11492 : 0 : rte_flow_error_set(error, EINVAL,
11493 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11494 : : NULL, "Unable to wait for ASO meter CQE");
11495 : 0 : break;
11496 : : }
11497 : : aso = true;
11498 : : break;
11499 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
11500 : 0 : ret = flow_dv_action_destroy(dev, handle, error);
11501 : 0 : break;
11502 : : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
11503 : : break;
11504 : 0 : default:
11505 : : ret = -ENOTSUP;
11506 : 0 : rte_flow_error_set(error, ENOTSUP,
11507 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11508 : : "action type not supported");
11509 : 0 : break;
11510 : : }
11511 [ # # ]: 0 : if (job && !force_job)
11512 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
11513 : : return ret;
11514 : : }
11515 : :
11516 : : static int
11517 : 0 : flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
11518 : : void *data, struct rte_flow_error *error)
11519 : : {
11520 : : struct mlx5_hws_cnt_pool *hpool;
11521 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
11522 : : struct mlx5_hws_cnt *cnt;
11523 : : struct rte_flow_query_count *qc = data;
11524 : : uint32_t iidx;
11525 : : uint64_t pkts, bytes;
11526 : :
11527 [ # # ]: 0 : if (!mlx5_hws_cnt_id_valid(counter))
11528 : 0 : return rte_flow_error_set(error, EINVAL,
11529 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11530 : : "counter are not available");
11531 [ # # ]: 0 : hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
11532 : : iidx = mlx5_hws_cnt_iidx(hpool, counter);
11533 : 0 : cnt = &hpool->pool[iidx];
11534 : : __hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
11535 : 0 : qc->hits_set = 1;
11536 : 0 : qc->bytes_set = 1;
11537 : 0 : qc->hits = pkts - cnt->reset.hits;
11538 : 0 : qc->bytes = bytes - cnt->reset.bytes;
11539 [ # # ]: 0 : if (qc->reset) {
11540 : 0 : cnt->reset.bytes = bytes;
11541 : 0 : cnt->reset.hits = pkts;
11542 : : }
11543 : : return 0;
11544 : : }
11545 : :
11546 : : /**
11547 : : * Query a flow rule AGE action for aging information.
11548 : : *
11549 : : * @param[in] dev
11550 : : * Pointer to Ethernet device.
11551 : : * @param[in] age_idx
11552 : : * Index of AGE action parameter.
11553 : : * @param[out] data
11554 : : * Data retrieved by the query.
11555 : : * @param[out] error
11556 : : * Perform verbose error reporting if not NULL.
11557 : : *
11558 : : * @return
11559 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
11560 : : */
11561 : : static int
11562 : 0 : flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
11563 : : struct rte_flow_error *error)
11564 : : {
11565 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11566 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
11567 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
11568 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
11569 : : struct rte_flow_query_age *resp = data;
11570 : :
11571 [ # # # # ]: 0 : if (!param || !param->timeout)
11572 : 0 : return rte_flow_error_set(error, EINVAL,
11573 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11574 : : NULL, "age data not available");
11575 [ # # # ]: 0 : switch (__atomic_load_n(¶m->state, __ATOMIC_RELAXED)) {
11576 : 0 : case HWS_AGE_AGED_OUT_REPORTED:
11577 : : case HWS_AGE_AGED_OUT_NOT_REPORTED:
11578 : 0 : resp->aged = 1;
11579 : 0 : break;
11580 : 0 : case HWS_AGE_CANDIDATE:
11581 : : case HWS_AGE_CANDIDATE_INSIDE_RING:
11582 : 0 : resp->aged = 0;
11583 : 0 : break;
11584 : : case HWS_AGE_FREE:
11585 : : /*
11586 : : * When state is FREE the flow itself should be invalid.
11587 : : * Fall-through.
11588 : : */
11589 : : default:
11590 : : MLX5_ASSERT(0);
11591 : : break;
11592 : : }
11593 : 0 : resp->sec_since_last_hit_valid = !resp->aged;
11594 [ # # ]: 0 : if (resp->sec_since_last_hit_valid)
11595 : 0 : resp->sec_since_last_hit = __atomic_load_n
11596 : 0 : (¶m->sec_since_last_hit, __ATOMIC_RELAXED);
11597 : : return 0;
11598 : : }
11599 : :
11600 : : static int
11601 : 0 : flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
11602 : : const struct rte_flow_action *actions, void *data,
11603 : : struct rte_flow_error *error)
11604 : : {
11605 : : int ret = -EINVAL;
11606 : : struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
11607 : : struct rte_flow_hw_aux *aux;
11608 : :
11609 [ # # ]: 0 : for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11610 [ # # # # ]: 0 : switch (actions->type) {
11611 : : case RTE_FLOW_ACTION_TYPE_VOID:
11612 : : break;
11613 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
11614 [ # # ]: 0 : if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID))
11615 : 0 : return rte_flow_error_set(error, EINVAL,
11616 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11617 : : "counter not defined in the rule");
11618 : 0 : ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
11619 : : error);
11620 : 0 : break;
11621 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
11622 [ # # ]: 0 : if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX))
11623 : 0 : return rte_flow_error_set(error, EINVAL,
11624 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11625 : : "age data not available");
11626 : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
11627 : 0 : ret = flow_hw_query_age(dev, mlx5_flow_hw_aux_get_age_idx(hw_flow, aux),
11628 : : data, error);
11629 : 0 : break;
11630 : 0 : default:
11631 : 0 : return rte_flow_error_set(error, ENOTSUP,
11632 : : RTE_FLOW_ERROR_TYPE_ACTION,
11633 : : actions,
11634 : : "action not supported");
11635 : : }
11636 : : }
11637 : : return ret;
11638 : : }
11639 : :
11640 : : /**
11641 : : * Validate indirect action.
11642 : : *
11643 : : * @param[in] dev
11644 : : * Pointer to the Ethernet device structure.
11645 : : * @param[in] conf
11646 : : * Shared action configuration.
11647 : : * @param[in] action
11648 : : * Action specification used to create indirect action.
11649 : : * @param[out] error
11650 : : * Perform verbose error reporting if not NULL. Initialized in case of
11651 : : * error only.
11652 : : *
11653 : : * @return
11654 : : * 0 on success, otherwise negative errno value.
11655 : : */
11656 : : static int
11657 : 0 : flow_hw_action_validate(struct rte_eth_dev *dev,
11658 : : const struct rte_flow_indir_action_conf *conf,
11659 : : const struct rte_flow_action *action,
11660 : : struct rte_flow_error *err)
11661 : : {
11662 : 0 : return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
11663 : : conf, action, NULL, err);
11664 : : }
11665 : :
11666 : : /**
11667 : : * Create indirect action.
11668 : : *
11669 : : * @param[in] dev
11670 : : * Pointer to the Ethernet device structure.
11671 : : * @param[in] conf
11672 : : * Shared action configuration.
11673 : : * @param[in] action
11674 : : * Action specification used to create indirect action.
11675 : : * @param[out] error
11676 : : * Perform verbose error reporting if not NULL. Initialized in case of
11677 : : * error only.
11678 : : *
11679 : : * @return
11680 : : * A valid shared action handle in case of success, NULL otherwise and
11681 : : * rte_errno is set.
11682 : : */
11683 : : static struct rte_flow_action_handle *
11684 : 0 : flow_hw_action_create(struct rte_eth_dev *dev,
11685 : : const struct rte_flow_indir_action_conf *conf,
11686 : : const struct rte_flow_action *action,
11687 : : struct rte_flow_error *err)
11688 : : {
11689 : 0 : return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
11690 : : NULL, conf, action, NULL, err);
11691 : : }
11692 : :
11693 : : /**
11694 : : * Destroy the indirect action.
11695 : : * Release action related resources on the NIC and the memory.
11696 : : * Lock free, (mutex should be acquired by caller).
11697 : : * Dispatcher for action type specific call.
11698 : : *
11699 : : * @param[in] dev
11700 : : * Pointer to the Ethernet device structure.
11701 : : * @param[in] handle
11702 : : * The indirect action object handle to be removed.
11703 : : * @param[out] error
11704 : : * Perform verbose error reporting if not NULL. Initialized in case of
11705 : : * error only.
11706 : : *
11707 : : * @return
11708 : : * 0 on success, otherwise negative errno value.
11709 : : */
11710 : : static int
11711 : 0 : flow_hw_action_destroy(struct rte_eth_dev *dev,
11712 : : struct rte_flow_action_handle *handle,
11713 : : struct rte_flow_error *error)
11714 : : {
11715 : 0 : return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
11716 : : NULL, handle, NULL, error);
11717 : : }
11718 : :
11719 : : /**
11720 : : * Updates in place shared action configuration.
11721 : : *
11722 : : * @param[in] dev
11723 : : * Pointer to the Ethernet device structure.
11724 : : * @param[in] handle
11725 : : * The indirect action object handle to be updated.
11726 : : * @param[in] update
11727 : : * Action specification used to modify the action pointed by *handle*.
11728 : : * *update* could be of same type with the action pointed by the *handle*
11729 : : * handle argument, or some other structures like a wrapper, depending on
11730 : : * the indirect action type.
11731 : : * @param[out] error
11732 : : * Perform verbose error reporting if not NULL. Initialized in case of
11733 : : * error only.
11734 : : *
11735 : : * @return
11736 : : * 0 on success, otherwise negative errno value.
11737 : : */
11738 : : static int
11739 : 0 : flow_hw_action_update(struct rte_eth_dev *dev,
11740 : : struct rte_flow_action_handle *handle,
11741 : : const void *update,
11742 : : struct rte_flow_error *err)
11743 : : {
11744 : 0 : return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
11745 : : NULL, handle, update, NULL, err);
11746 : : }
11747 : :
11748 : : static int
11749 : 0 : flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
11750 : : const struct rte_flow_op_attr *attr,
11751 : : const struct rte_flow_action_handle *handle,
11752 : : void *data, void *user_data,
11753 : : struct rte_flow_error *error)
11754 : : {
11755 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11756 : : struct mlx5_hw_q_job *job = NULL;
11757 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)handle;
11758 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
11759 [ # # ]: 0 : uint32_t idx = MLX5_INDIRECT_ACTION_IDX_GET(handle);
11760 : : uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
11761 : : int ret;
11762 : : bool push = flow_hw_action_push(attr);
11763 : : bool aso = false;
11764 : :
11765 [ # # ]: 0 : if (attr) {
11766 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
11767 : : data, MLX5_HW_Q_JOB_TYPE_QUERY,
11768 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
11769 : : if (!job)
11770 : 0 : return -rte_errno;
11771 : : }
11772 [ # # # # : 0 : switch (type) {
# ]
11773 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
11774 : 0 : ret = flow_hw_query_age(dev, age_idx, data, error);
11775 : 0 : break;
11776 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
11777 : 0 : ret = flow_hw_query_counter(dev, act_idx, data, error);
11778 : 0 : break;
11779 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
11780 : : aso = true;
11781 [ # # ]: 0 : if (job)
11782 : 0 : job->query.user = data;
11783 : 0 : ret = flow_hw_conntrack_query(dev, queue, idx, data,
11784 : : job, push, error);
11785 : 0 : break;
11786 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
11787 : : aso = true;
11788 : 0 : ret = mlx5_quota_query(dev, queue, handle, data,
11789 : : job, push, error);
11790 : 0 : break;
11791 : 0 : default:
11792 : : ret = -ENOTSUP;
11793 : 0 : rte_flow_error_set(error, ENOTSUP,
11794 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11795 : : "action type not supported");
11796 : 0 : break;
11797 : : }
11798 [ # # ]: 0 : if (job)
11799 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
11800 : : return ret;
11801 : : }
11802 : :
11803 : : static int
11804 : 0 : flow_hw_async_action_handle_query_update
11805 : : (struct rte_eth_dev *dev, uint32_t queue,
11806 : : const struct rte_flow_op_attr *attr,
11807 : : struct rte_flow_action_handle *handle,
11808 : : const void *update, void *query,
11809 : : enum rte_flow_query_update_mode qu_mode,
11810 : : void *user_data, struct rte_flow_error *error)
11811 : : {
11812 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
11813 : : bool push = flow_hw_action_push(attr);
11814 : : bool aso = false;
11815 : : struct mlx5_hw_q_job *job = NULL;
11816 : : int ret = 0;
11817 : :
11818 [ # # ]: 0 : if (attr) {
11819 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
11820 : : query,
11821 : : MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
11822 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
11823 : : if (!job)
11824 : 0 : return -rte_errno;
11825 : : }
11826 [ # # ]: 0 : switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
11827 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
11828 [ # # ]: 0 : if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
11829 : 0 : ret = rte_flow_error_set
11830 : : (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
11831 : : NULL, "quota action must query before update");
11832 : 0 : break;
11833 : : }
11834 : : aso = true;
11835 : 0 : ret = mlx5_quota_query_update(dev, queue, handle,
11836 : : update, query, job, push, error);
11837 : 0 : break;
11838 : 0 : default:
11839 : 0 : ret = rte_flow_error_set(error, ENOTSUP,
11840 : : RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
11841 : : }
11842 [ # # ]: 0 : if (job)
11843 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
11844 : : return ret;
11845 : : }
11846 : :
11847 : : static int
11848 : 0 : flow_hw_action_query(struct rte_eth_dev *dev,
11849 : : const struct rte_flow_action_handle *handle, void *data,
11850 : : struct rte_flow_error *error)
11851 : : {
11852 : 0 : return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
11853 : : handle, data, NULL, error);
11854 : : }
11855 : :
11856 : : static int
11857 : 0 : flow_hw_action_query_update(struct rte_eth_dev *dev,
11858 : : struct rte_flow_action_handle *handle,
11859 : : const void *update, void *query,
11860 : : enum rte_flow_query_update_mode qu_mode,
11861 : : struct rte_flow_error *error)
11862 : : {
11863 : 0 : return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
11864 : : NULL, handle, update,
11865 : : query, qu_mode, NULL,
11866 : : error);
11867 : : }
11868 : :
11869 : : /**
11870 : : * Get aged-out flows of a given port on the given HWS flow queue.
11871 : : *
11872 : : * @param[in] dev
11873 : : * Pointer to the Ethernet device structure.
11874 : : * @param[in] queue_id
11875 : : * Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
11876 : : * @param[in, out] contexts
11877 : : * The address of an array of pointers to the aged-out flows contexts.
11878 : : * @param[in] nb_contexts
11879 : : * The length of context array pointers.
11880 : : * @param[out] error
11881 : : * Perform verbose error reporting if not NULL. Initialized in case of
11882 : : * error only.
11883 : : *
11884 : : * @return
11885 : : * if nb_contexts is 0, return the amount of all aged contexts.
11886 : : * if nb_contexts is not 0 , return the amount of aged flows reported
11887 : : * in the context array, otherwise negative errno value.
11888 : : */
11889 : : static int
11890 : 0 : flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
11891 : : void **contexts, uint32_t nb_contexts,
11892 : : struct rte_flow_error *error)
11893 : : {
11894 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11895 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
11896 : : struct rte_ring *r;
11897 : : int nb_flows = 0;
11898 : :
11899 [ # # ]: 0 : if (nb_contexts && !contexts)
11900 : 0 : return rte_flow_error_set(error, EINVAL,
11901 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11902 : : NULL, "empty context");
11903 [ # # ]: 0 : if (!priv->hws_age_req)
11904 : 0 : return rte_flow_error_set(error, ENOENT,
11905 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11906 : : NULL, "No aging initialized");
11907 [ # # ]: 0 : if (priv->hws_strict_queue) {
11908 [ # # ]: 0 : if (queue_id >= age_info->hw_q_age->nb_rings)
11909 : 0 : return rte_flow_error_set(error, EINVAL,
11910 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11911 : : NULL, "invalid queue id");
11912 : 0 : r = age_info->hw_q_age->aged_lists[queue_id];
11913 : : } else {
11914 : 0 : r = age_info->hw_age.aged_list;
11915 : 0 : MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
11916 : : }
11917 [ # # ]: 0 : if (nb_contexts == 0)
11918 : 0 : return rte_ring_count(r);
11919 [ # # ]: 0 : while ((uint32_t)nb_flows < nb_contexts) {
11920 : : uint32_t age_idx;
11921 : :
11922 : : if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
11923 : : break;
11924 : : /* get the AGE context if the aged-out index is still valid. */
11925 : 0 : contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
11926 [ # # ]: 0 : if (!contexts[nb_flows])
11927 : 0 : continue;
11928 : 0 : nb_flows++;
11929 : : }
11930 : : return nb_flows;
11931 : : }
11932 : :
11933 : : /**
11934 : : * Get aged-out flows.
11935 : : *
11936 : : * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
11937 : : *
11938 : : * @param[in] dev
11939 : : * Pointer to the Ethernet device structure.
11940 : : * @param[in] contexts
11941 : : * The address of an array of pointers to the aged-out flows contexts.
11942 : : * @param[in] nb_contexts
11943 : : * The length of context array pointers.
11944 : : * @param[out] error
11945 : : * Perform verbose error reporting if not NULL. Initialized in case of
11946 : : * error only.
11947 : : *
11948 : : * @return
11949 : : * how many contexts get in success, otherwise negative errno value.
11950 : : * if nb_contexts is 0, return the amount of all aged contexts.
11951 : : * if nb_contexts is not 0 , return the amount of aged flows reported
11952 : : * in the context array.
11953 : : */
11954 : : static int
11955 : 0 : flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
11956 : : uint32_t nb_contexts, struct rte_flow_error *error)
11957 : : {
11958 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11959 : :
11960 [ # # ]: 0 : if (priv->hws_strict_queue)
11961 : 0 : DRV_LOG(WARNING,
11962 : : "port %u get aged flows called in strict queue mode.",
11963 : : dev->data->port_id);
11964 : 0 : return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
11965 : : }
11966 : :
11967 : : static void
11968 : 0 : mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
11969 : : struct mlx5_mirror_clone *clone)
11970 : : {
11971 [ # # # ]: 0 : switch (clone->type) {
11972 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
11973 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
11974 : 0 : mlx5_hrxq_release(dev,
11975 : 0 : ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
11976 : 0 : break;
11977 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
11978 : 0 : flow_hw_jump_release(dev, clone->action_ctx);
11979 : : break;
11980 : : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
11981 : : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
11982 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
11983 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
11984 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
11985 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
11986 : : default:
11987 : : break;
11988 : : }
11989 : 0 : }
11990 : :
11991 : : void
11992 [ # # ]: 0 : mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
11993 : : {
11994 : : uint32_t i;
11995 : :
11996 : : mlx5_indirect_list_remove_entry(&mirror->indirect);
11997 [ # # ]: 0 : for (i = 0; i < mirror->clones_num; i++)
11998 : 0 : mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
11999 [ # # ]: 0 : if (mirror->mirror_action)
12000 : 0 : mlx5dr_action_destroy(mirror->mirror_action);
12001 : 0 : mlx5_free(mirror);
12002 : 0 : }
12003 : :
12004 : : static __rte_always_inline bool
12005 : : mlx5_mirror_terminal_action(const struct rte_flow_action *action)
12006 : : {
12007 : 0 : switch (action->type) {
12008 : : case RTE_FLOW_ACTION_TYPE_JUMP:
12009 : : case RTE_FLOW_ACTION_TYPE_RSS:
12010 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
12011 : : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12012 : : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
12013 : : return true;
12014 : : default:
12015 : : break;
12016 : : }
12017 : : return false;
12018 : : }
12019 : :
12020 : : static bool
12021 : 0 : mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
12022 : : const struct rte_flow_attr *flow_attr,
12023 : : const struct rte_flow_action *action)
12024 : : {
12025 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12026 : : const struct rte_flow_action_ethdev *port = NULL;
12027 [ # # # # ]: 0 : bool is_proxy = MLX5_HW_PORT_IS_PROXY(priv);
12028 : :
12029 [ # # ]: 0 : if (!action)
12030 : : return false;
12031 [ # # # # ]: 0 : switch (action->type) {
12032 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
12033 : : case RTE_FLOW_ACTION_TYPE_RSS:
12034 [ # # ]: 0 : if (flow_attr->transfer)
12035 : 0 : return false;
12036 : : break;
12037 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
12038 [ # # # # ]: 0 : if (!is_proxy || !flow_attr->transfer)
12039 : : return false;
12040 : 0 : port = action->conf;
12041 [ # # # # ]: 0 : if (!port || port->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
12042 : 0 : return false;
12043 : : break;
12044 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12045 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12046 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12047 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12048 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12049 [ # # # # ]: 0 : if (!is_proxy || !flow_attr->transfer)
12050 : : return false;
12051 [ # # ]: 0 : if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
12052 [ # # ]: 0 : action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
12053 : 0 : return false;
12054 : : break;
12055 : : default:
12056 : : return false;
12057 : : }
12058 : : return true;
12059 : : }
12060 : :
12061 : : /**
12062 : : * Valid mirror actions list includes one or two SAMPLE actions
12063 : : * followed by JUMP.
12064 : : *
12065 : : * @return
12066 : : * Number of mirrors *action* list was valid.
12067 : : * -EINVAL otherwise.
12068 : : */
12069 : : static int
12070 : 0 : mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
12071 : : const struct rte_flow_attr *flow_attr,
12072 : : const struct rte_flow_action *actions)
12073 : : {
12074 [ # # ]: 0 : if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
12075 : : int i = 1;
12076 : : bool valid;
12077 : 0 : const struct rte_flow_action_sample *sample = actions[0].conf;
12078 : 0 : valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
12079 : 0 : sample->actions);
12080 [ # # ]: 0 : if (!valid)
12081 : : return -EINVAL;
12082 [ # # ]: 0 : if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
12083 : : i = 2;
12084 : 0 : sample = actions[1].conf;
12085 : 0 : valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
12086 : 0 : sample->actions);
12087 [ # # ]: 0 : if (!valid)
12088 : : return -EINVAL;
12089 : : }
12090 [ # # ]: 0 : return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
12091 : : }
12092 : : return -EINVAL;
12093 : : }
12094 : :
12095 : : static int
12096 [ # # ]: 0 : mirror_format_tir(struct rte_eth_dev *dev,
12097 : : struct mlx5_mirror_clone *clone,
12098 : : const struct mlx5_flow_template_table_cfg *table_cfg,
12099 : : const struct rte_flow_action *action,
12100 : : struct mlx5dr_action_dest_attr *dest_attr,
12101 : : struct rte_flow_error *error)
12102 : : {
12103 : : uint32_t hws_flags;
12104 : : enum mlx5dr_table_type table_type;
12105 : : struct mlx5_hrxq *tir_ctx;
12106 : :
12107 : : table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
12108 : 0 : hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
12109 : 0 : tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
12110 [ # # ]: 0 : if (!tir_ctx)
12111 : 0 : return rte_flow_error_set(error, EINVAL,
12112 : : RTE_FLOW_ERROR_TYPE_ACTION,
12113 : : action, "failed to create QUEUE action for mirror clone");
12114 : 0 : dest_attr->dest = tir_ctx->action;
12115 : 0 : clone->action_ctx = tir_ctx;
12116 : 0 : return 0;
12117 : : }
12118 : :
12119 : : static int
12120 : 0 : mirror_format_jump(struct rte_eth_dev *dev,
12121 : : struct mlx5_mirror_clone *clone,
12122 : : const struct mlx5_flow_template_table_cfg *table_cfg,
12123 : : const struct rte_flow_action *action,
12124 : : struct mlx5dr_action_dest_attr *dest_attr,
12125 : : struct rte_flow_error *error)
12126 : : {
12127 : 0 : const struct rte_flow_action_jump *jump_conf = action->conf;
12128 : 0 : struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
12129 : : (dev, table_cfg,
12130 : 0 : jump_conf->group, error);
12131 : :
12132 [ # # ]: 0 : if (!jump)
12133 : 0 : return rte_flow_error_set(error, EINVAL,
12134 : : RTE_FLOW_ERROR_TYPE_ACTION,
12135 : : action, "failed to create JUMP action for mirror clone");
12136 : 0 : dest_attr->dest = jump->hws_action;
12137 : 0 : clone->action_ctx = jump;
12138 : 0 : return 0;
12139 : : }
12140 : :
12141 : : static int
12142 : : mirror_format_port(struct rte_eth_dev *dev,
12143 : : const struct rte_flow_action *action,
12144 : : struct mlx5dr_action_dest_attr *dest_attr,
12145 : : struct rte_flow_error __rte_unused *error)
12146 : : {
12147 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12148 : 0 : const struct rte_flow_action_ethdev *port_action = action->conf;
12149 : :
12150 : 0 : dest_attr->dest = priv->hw_vport[port_action->port_id];
12151 : : return 0;
12152 : : }
12153 : :
12154 : : static int
12155 : 0 : hw_mirror_clone_reformat(const struct rte_flow_action *actions,
12156 : : struct mlx5dr_action_dest_attr *dest_attr,
12157 : : enum mlx5dr_action_type *action_type,
12158 : : uint8_t *reformat_buf, bool decap)
12159 : : {
12160 : : int ret;
12161 : : const struct rte_flow_item *encap_item = NULL;
12162 : : const struct rte_flow_action_raw_encap *encap_conf = NULL;
12163 : : typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
12164 : :
12165 [ # # # # ]: 0 : switch (actions[0].type) {
12166 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12167 : 0 : encap_conf = actions[0].conf;
12168 : 0 : break;
12169 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12170 : 0 : encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
12171 : : actions);
12172 : 0 : break;
12173 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12174 : 0 : encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
12175 : : actions);
12176 : 0 : break;
12177 : : default:
12178 : : return -EINVAL;
12179 : : }
12180 : 0 : *action_type = decap ?
12181 [ # # ]: 0 : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
12182 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
12183 [ # # ]: 0 : if (encap_item) {
12184 : 0 : ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
12185 : : &reformat->reformat_data_sz, NULL);
12186 [ # # ]: 0 : if (ret)
12187 : : return -EINVAL;
12188 : 0 : reformat->reformat_data = reformat_buf;
12189 : : } else {
12190 : 0 : reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
12191 : 0 : reformat->reformat_data_sz = encap_conf->size;
12192 : : }
12193 : : return 0;
12194 : : }
12195 : :
12196 : : static int
12197 : 0 : hw_mirror_format_clone(struct rte_eth_dev *dev,
12198 : : struct mlx5_mirror_clone *clone,
12199 : : const struct mlx5_flow_template_table_cfg *table_cfg,
12200 : : const struct rte_flow_action *actions,
12201 : : struct mlx5dr_action_dest_attr *dest_attr,
12202 : : uint8_t *reformat_buf, struct rte_flow_error *error)
12203 : : {
12204 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12205 : : int ret;
12206 : : uint32_t i;
12207 : : bool decap_seen = false;
12208 : :
12209 [ # # ]: 0 : for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
12210 : 0 : dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
12211 [ # # # # : 0 : switch (actions[i].type) {
# # # ]
12212 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
12213 : : case RTE_FLOW_ACTION_TYPE_RSS:
12214 : 0 : ret = mirror_format_tir(dev, clone, table_cfg,
12215 : : &actions[i], dest_attr, error);
12216 [ # # ]: 0 : if (ret)
12217 : 0 : return ret;
12218 : : break;
12219 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
12220 : : ret = mirror_format_port(dev, &actions[i],
12221 : : dest_attr, error);
12222 : : if (ret)
12223 : : return ret;
12224 : : break;
12225 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
12226 : 0 : ret = mirror_format_jump(dev, clone, table_cfg,
12227 : : &actions[i], dest_attr, error);
12228 [ # # ]: 0 : if (ret)
12229 : 0 : return ret;
12230 : : break;
12231 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
12232 : 0 : dest_attr->dest = priv->hw_def_miss;
12233 : 0 : break;
12234 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12235 : : decap_seen = true;
12236 : : break;
12237 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12238 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
12239 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
12240 : 0 : ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
12241 : : &dest_attr->action_type[i],
12242 : : reformat_buf, decap_seen);
12243 [ # # ]: 0 : if (ret < 0)
12244 : 0 : return rte_flow_error_set(error, EINVAL,
12245 : : RTE_FLOW_ERROR_TYPE_ACTION,
12246 : : &actions[i],
12247 : : "failed to create reformat action");
12248 : : break;
12249 : 0 : default:
12250 : 0 : return rte_flow_error_set(error, EINVAL,
12251 : : RTE_FLOW_ERROR_TYPE_ACTION,
12252 : : &actions[i], "unsupported sample action");
12253 : : }
12254 : 0 : clone->type = actions->type;
12255 : : }
12256 : 0 : dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
12257 : 0 : return 0;
12258 : : }
12259 : :
12260 : : static struct rte_flow_action_list_handle *
12261 : 0 : mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
12262 : : const struct mlx5_flow_template_table_cfg *table_cfg,
12263 : : const struct rte_flow_action *actions,
12264 : : struct rte_flow_error *error)
12265 : : {
12266 : : uint32_t hws_flags;
12267 : : int ret = 0, i, clones_num;
12268 : : struct mlx5_mirror *mirror;
12269 : : enum mlx5dr_table_type table_type;
12270 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12271 [ # # ]: 0 : const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
12272 : : uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
12273 : : struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
12274 : : enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
12275 : : [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
12276 : :
12277 : : memset(mirror_attr, 0, sizeof(mirror_attr));
12278 : : memset(array_action_types, 0, sizeof(array_action_types));
12279 : : table_type = get_mlx5dr_table_type(flow_attr);
12280 : 0 : hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
12281 : 0 : clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
12282 : : actions);
12283 [ # # ]: 0 : if (clones_num < 0) {
12284 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12285 : : actions, "Invalid mirror list format");
12286 : 0 : return NULL;
12287 : : }
12288 : 0 : mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
12289 : : 0, SOCKET_ID_ANY);
12290 [ # # ]: 0 : if (!mirror) {
12291 : 0 : rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
12292 : : actions, "Failed to allocate mirror context");
12293 : 0 : return NULL;
12294 : : }
12295 : :
12296 : 0 : mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
12297 : 0 : mirror->clones_num = clones_num;
12298 [ # # ]: 0 : for (i = 0; i < clones_num; i++) {
12299 : : const struct rte_flow_action *clone_actions;
12300 : :
12301 : 0 : mirror_attr[i].action_type = array_action_types[i];
12302 [ # # ]: 0 : if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
12303 : 0 : const struct rte_flow_action_sample *sample = actions[i].conf;
12304 : :
12305 : 0 : clone_actions = sample->actions;
12306 : : } else {
12307 : : clone_actions = &actions[i];
12308 : : }
12309 : 0 : ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
12310 : : clone_actions, &mirror_attr[i],
12311 : 0 : reformat_buf[i], error);
12312 : :
12313 [ # # ]: 0 : if (ret)
12314 : 0 : goto error;
12315 : : }
12316 : 0 : hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
12317 : 0 : mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
12318 : : clones_num,
12319 : : mirror_attr,
12320 : : hws_flags);
12321 [ # # ]: 0 : if (!mirror->mirror_action) {
12322 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12323 : : actions, "Failed to create HWS mirror action");
12324 : 0 : goto error;
12325 : : }
12326 : :
12327 [ # # ]: 0 : mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
12328 : 0 : return (struct rte_flow_action_list_handle *)mirror;
12329 : :
12330 : 0 : error:
12331 : 0 : mlx5_hw_mirror_destroy(dev, mirror);
12332 : 0 : return NULL;
12333 : : }
12334 : :
12335 : : void
12336 : 0 : mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
12337 : : struct mlx5_indirect_list *ptr)
12338 : : {
12339 : : struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
12340 : :
12341 : : switch (obj->legacy_type) {
12342 : : case RTE_FLOW_ACTION_TYPE_METER_MARK:
12343 : : break; /* ASO meters were released in mlx5_flow_meter_flush() */
12344 : : default:
12345 : : break;
12346 : : }
12347 : 0 : mlx5_free(obj);
12348 : 0 : }
12349 : :
12350 : : static struct rte_flow_action_list_handle *
12351 : 0 : mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
12352 : : const struct rte_flow_op_attr *attr,
12353 : : const struct rte_flow_indir_action_conf *conf,
12354 : : const struct rte_flow_action *actions,
12355 : : void *user_data, struct rte_flow_error *error)
12356 : : {
12357 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12358 : 0 : struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
12359 : : sizeof(*indlst_obj),
12360 : : 0, SOCKET_ID_ANY);
12361 : :
12362 [ # # ]: 0 : if (!indlst_obj)
12363 : : return NULL;
12364 : 0 : indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
12365 : : actions, user_data,
12366 : : error);
12367 [ # # ]: 0 : if (!indlst_obj->handle) {
12368 : 0 : mlx5_free(indlst_obj);
12369 : 0 : return NULL;
12370 : : }
12371 : 0 : indlst_obj->legacy_type = actions[0].type;
12372 : 0 : indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
12373 [ # # ]: 0 : mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
12374 : 0 : return (struct rte_flow_action_list_handle *)indlst_obj;
12375 : : }
12376 : :
12377 : : static __rte_always_inline enum mlx5_indirect_list_type
12378 : : flow_hw_inlist_type_get(const struct rte_flow_action *actions)
12379 : : {
12380 [ # # # # ]: 0 : switch (actions[0].type) {
12381 : : case RTE_FLOW_ACTION_TYPE_SAMPLE:
12382 : : return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
12383 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
12384 : 0 : return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
12385 : 0 : MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
12386 : : MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
12387 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
12388 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
12389 : : return MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
12390 : : default:
12391 : : break;
12392 : : }
12393 : : return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
12394 : : }
12395 : :
12396 : : static struct rte_flow_action_list_handle*
12397 : 0 : mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,
12398 : : const struct mlx5_flow_template_table_cfg *table_cfg,
12399 : : const struct rte_flow_action *actions,
12400 : : struct rte_flow_error *error)
12401 : : {
12402 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12403 : : const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
12404 : : const struct rte_flow_action *encap = NULL;
12405 : : const struct rte_flow_action *decap = NULL;
12406 : 0 : struct rte_flow_indir_action_conf indirect_conf = {
12407 : 0 : .ingress = flow_attr->ingress,
12408 : 0 : .egress = flow_attr->egress,
12409 : 0 : .transfer = flow_attr->transfer,
12410 : : };
12411 : : struct mlx5_hw_encap_decap_action *handle;
12412 : : uint64_t action_flags = 0;
12413 : :
12414 : : /*
12415 : : * Allow
12416 : : * 1. raw_decap / raw_encap / end
12417 : : * 2. raw_encap / end
12418 : : * 3. raw_decap / end
12419 : : */
12420 [ # # ]: 0 : while (actions->type != RTE_FLOW_ACTION_TYPE_END) {
12421 [ # # ]: 0 : if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
12422 [ # # ]: 0 : if (action_flags) {
12423 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12424 : : actions, "Invalid indirect action list sequence");
12425 : 0 : return NULL;
12426 : : }
12427 : : action_flags |= MLX5_FLOW_ACTION_DECAP;
12428 : : decap = actions;
12429 [ # # ]: 0 : } else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
12430 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_ENCAP) {
12431 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12432 : : actions, "Invalid indirect action list sequence");
12433 : 0 : return NULL;
12434 : : }
12435 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
12436 : : encap = actions;
12437 : : } else {
12438 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12439 : : actions, "Invalid indirect action type in list");
12440 : 0 : return NULL;
12441 : : }
12442 : 0 : actions++;
12443 : : }
12444 [ # # ]: 0 : if (!decap && !encap) {
12445 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12446 : : actions, "Invalid indirect action combinations");
12447 : 0 : return NULL;
12448 : : }
12449 : 0 : handle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);
12450 [ # # ]: 0 : if (!handle) {
12451 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12452 : : actions, "Failed to create HWS decap_encap action");
12453 : 0 : return NULL;
12454 : : }
12455 : 0 : handle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
12456 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);
12457 : 0 : return (struct rte_flow_action_list_handle *)handle;
12458 : : }
12459 : :
12460 : : static struct rte_flow_action_list_handle *
12461 [ # # ]: 0 : flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
12462 : : const struct rte_flow_op_attr *attr,
12463 : : const struct rte_flow_indir_action_conf *conf,
12464 : : const struct rte_flow_action *actions,
12465 : : void *user_data,
12466 : : struct rte_flow_error *error)
12467 : : {
12468 : : struct mlx5_hw_q_job *job = NULL;
12469 : : bool push = flow_hw_action_push(attr);
12470 : : enum mlx5_indirect_list_type list_type;
12471 : : struct rte_flow_action_list_handle *handle;
12472 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12473 : 0 : const struct mlx5_flow_template_table_cfg table_cfg = {
12474 : : .external = true,
12475 : : .attr = {
12476 : : .flow_attr = {
12477 : 0 : .ingress = conf->ingress,
12478 : 0 : .egress = conf->egress,
12479 : 0 : .transfer = conf->transfer
12480 : : }
12481 : : }
12482 : : };
12483 : :
12484 [ # # ]: 0 : if (!actions) {
12485 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12486 : : NULL, "No action list");
12487 : 0 : return NULL;
12488 : : }
12489 : : list_type = flow_hw_inlist_type_get(actions);
12490 [ # # ]: 0 : if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
12491 : : /*
12492 : : * Legacy indirect actions already have
12493 : : * async resources management. No need to do it twice.
12494 : : */
12495 : 0 : handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
12496 : : actions, user_data, error);
12497 : 0 : goto end;
12498 : : }
12499 [ # # ]: 0 : if (attr) {
12500 : : job = flow_hw_action_job_init(priv, queue, NULL, user_data,
12501 : : NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
12502 : : MLX5_HW_INDIRECT_TYPE_LIST, error);
12503 : : if (!job)
12504 : 0 : return NULL;
12505 : : }
12506 [ # # # ]: 0 : switch (list_type) {
12507 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
12508 : 0 : handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
12509 : : actions, error);
12510 : 0 : break;
12511 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
12512 : 0 : handle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,
12513 : : actions, error);
12514 : 0 : break;
12515 : 0 : default:
12516 : : handle = NULL;
12517 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12518 : : actions, "Invalid list");
12519 : : }
12520 [ # # ]: 0 : if (job) {
12521 : 0 : job->action = handle;
12522 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, false,
12523 : : handle != NULL);
12524 : : }
12525 : 0 : end:
12526 : : return handle;
12527 : : }
12528 : :
12529 : : static struct rte_flow_action_list_handle *
12530 : 0 : flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
12531 : : const struct rte_flow_indir_action_conf *conf,
12532 : : const struct rte_flow_action *actions,
12533 : : struct rte_flow_error *error)
12534 : : {
12535 : 0 : return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
12536 : : NULL, conf, actions,
12537 : : NULL, error);
12538 : : }
12539 : :
12540 : : static int
12541 [ # # ]: 0 : flow_hw_async_action_list_handle_destroy
12542 : : (struct rte_eth_dev *dev, uint32_t queue,
12543 : : const struct rte_flow_op_attr *attr,
12544 : : struct rte_flow_action_list_handle *handle,
12545 : : void *user_data, struct rte_flow_error *error)
12546 : : {
12547 : : int ret = 0;
12548 : : struct mlx5_hw_q_job *job = NULL;
12549 : : bool push = flow_hw_action_push(attr);
12550 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
12551 : : enum mlx5_indirect_list_type type =
12552 : : mlx5_get_indirect_list_type((void *)handle);
12553 : :
12554 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
12555 : : struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
12556 : :
12557 : 0 : ret = flow_hw_action_handle_destroy(dev, queue, attr,
12558 : : legacy->handle,
12559 : : user_data, error);
12560 : : mlx5_indirect_list_remove_entry(&legacy->indirect);
12561 : 0 : goto end;
12562 : : }
12563 [ # # ]: 0 : if (attr) {
12564 : : job = flow_hw_action_job_init(priv, queue, NULL, user_data,
12565 : : NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
12566 : : MLX5_HW_INDIRECT_TYPE_LIST, error);
12567 : : if (!job)
12568 : 0 : return rte_errno;
12569 : : }
12570 [ # # # ]: 0 : switch (type) {
12571 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
12572 : 0 : mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
12573 : 0 : break;
12574 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
12575 [ # # ]: 0 : LIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,
12576 : : entry);
12577 : 0 : mlx5_reformat_action_destroy(dev, handle, error);
12578 : 0 : break;
12579 : 0 : default:
12580 : 0 : ret = rte_flow_error_set(error, EINVAL,
12581 : : RTE_FLOW_ERROR_TYPE_ACTION,
12582 : : NULL, "Invalid indirect list handle");
12583 : : }
12584 [ # # ]: 0 : if (job) {
12585 : : flow_hw_action_finalize(dev, queue, job, push, false, true);
12586 : : }
12587 : 0 : end:
12588 : : return ret;
12589 : : }
12590 : :
12591 : : static int
12592 : 0 : flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
12593 : : struct rte_flow_action_list_handle *handle,
12594 : : struct rte_flow_error *error)
12595 : : {
12596 : 0 : return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
12597 : : NULL, handle, NULL,
12598 : : error);
12599 : : }
12600 : :
12601 : : static int
12602 [ # # ]: 0 : flow_hw_async_action_list_handle_query_update
12603 : : (struct rte_eth_dev *dev, uint32_t queue_id,
12604 : : const struct rte_flow_op_attr *attr,
12605 : : const struct rte_flow_action_list_handle *handle,
12606 : : const void **update, void **query,
12607 : : enum rte_flow_query_update_mode mode,
12608 : : void *user_data, struct rte_flow_error *error)
12609 : : {
12610 : : enum mlx5_indirect_list_type type =
12611 : : mlx5_get_indirect_list_type((const void *)handle);
12612 : :
12613 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
12614 : : struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
12615 : :
12616 [ # # ]: 0 : if (update && query)
12617 : 0 : return flow_hw_async_action_handle_query_update
12618 : : (dev, queue_id, attr, legacy->handle,
12619 : : update, query, mode, user_data, error);
12620 [ # # # # ]: 0 : else if (update && update[0])
12621 : 0 : return flow_hw_action_handle_update(dev, queue_id, attr,
12622 : : legacy->handle, update[0],
12623 : : user_data, error);
12624 [ # # # # ]: 0 : else if (query && query[0])
12625 : 0 : return flow_hw_action_handle_query(dev, queue_id, attr,
12626 : 0 : legacy->handle, query[0],
12627 : : user_data, error);
12628 : : else
12629 : 0 : return rte_flow_error_set(error, EINVAL,
12630 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12631 : : NULL, "invalid legacy handle query_update parameters");
12632 : : }
12633 : : return -ENOTSUP;
12634 : : }
12635 : :
12636 : : static int
12637 : 0 : flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
12638 : : const struct rte_flow_action_list_handle *handle,
12639 : : const void **update, void **query,
12640 : : enum rte_flow_query_update_mode mode,
12641 : : struct rte_flow_error *error)
12642 : : {
12643 : 0 : return flow_hw_async_action_list_handle_query_update
12644 : : (dev, MLX5_HW_INV_QUEUE, NULL, handle,
12645 : : update, query, mode, NULL, error);
12646 : : }
12647 : :
12648 : : static int
12649 : 0 : flow_hw_calc_table_hash(struct rte_eth_dev *dev,
12650 : : const struct rte_flow_template_table *table,
12651 : : const struct rte_flow_item pattern[],
12652 : : uint8_t pattern_template_index,
12653 : : uint32_t *hash, struct rte_flow_error *error)
12654 : : {
12655 : : const struct rte_flow_item *items;
12656 : : struct mlx5_flow_hw_pattern_params pp;
12657 : : int res;
12658 : :
12659 : 0 : items = flow_hw_get_rule_items(dev, table, pattern,
12660 : : pattern_template_index,
12661 : : &pp);
12662 : 0 : res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
12663 : : pattern_template_index,
12664 : : MLX5DR_RULE_HASH_CALC_MODE_RAW,
12665 : : hash);
12666 [ # # ]: 0 : if (res)
12667 : 0 : return rte_flow_error_set(error, res,
12668 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12669 : : NULL,
12670 : : "hash could not be calculated");
12671 : : return 0;
12672 : : }
12673 : :
12674 : : static int
12675 : 0 : flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
12676 : : const struct rte_flow_item pattern[],
12677 : : enum rte_flow_encap_hash_field dest_field,
12678 : : uint8_t *hash,
12679 : : struct rte_flow_error *error)
12680 : : {
12681 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12682 : : struct mlx5dr_crc_encap_entropy_hash_fields data;
12683 : 0 : enum mlx5dr_crc_encap_entropy_hash_size res_size =
12684 : : dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ?
12685 : 0 : MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_16 :
12686 : : MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_8;
12687 : : int res;
12688 : :
12689 : : memset(&data, 0, sizeof(struct mlx5dr_crc_encap_entropy_hash_fields));
12690 : :
12691 [ # # ]: 0 : for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
12692 [ # # # # : 0 : switch (pattern->type) {
# # # ]
12693 : 0 : case RTE_FLOW_ITEM_TYPE_IPV4:
12694 : 0 : data.dst.ipv4_addr =
12695 : 0 : ((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.dst_addr;
12696 : 0 : data.src.ipv4_addr =
12697 : 0 : ((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.src_addr;
12698 : 0 : break;
12699 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6:
12700 : : memcpy(data.dst.ipv6_addr,
12701 : 0 : ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
12702 : : sizeof(data.dst.ipv6_addr));
12703 : : memcpy(data.src.ipv6_addr,
12704 : : ((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
12705 : : sizeof(data.src.ipv6_addr));
12706 : : break;
12707 : 0 : case RTE_FLOW_ITEM_TYPE_UDP:
12708 : 0 : data.next_protocol = IPPROTO_UDP;
12709 : 0 : data.dst_port =
12710 : 0 : ((const struct rte_flow_item_udp *)(pattern->spec))->hdr.dst_port;
12711 : 0 : data.src_port =
12712 : 0 : ((const struct rte_flow_item_udp *)(pattern->spec))->hdr.src_port;
12713 : 0 : break;
12714 : 0 : case RTE_FLOW_ITEM_TYPE_TCP:
12715 : 0 : data.next_protocol = IPPROTO_TCP;
12716 : 0 : data.dst_port =
12717 : 0 : ((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.dst_port;
12718 : 0 : data.src_port =
12719 : 0 : ((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.src_port;
12720 : 0 : break;
12721 : 0 : case RTE_FLOW_ITEM_TYPE_ICMP:
12722 : 0 : data.next_protocol = IPPROTO_ICMP;
12723 : 0 : break;
12724 : 0 : case RTE_FLOW_ITEM_TYPE_ICMP6:
12725 : 0 : data.next_protocol = IPPROTO_ICMPV6;
12726 : 0 : break;
12727 : : default:
12728 : : break;
12729 : : }
12730 : : }
12731 : 0 : res = mlx5dr_crc_encap_entropy_hash_calc(priv->dr_ctx, &data, hash, res_size);
12732 [ # # ]: 0 : if (res)
12733 : 0 : return rte_flow_error_set(error, res,
12734 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12735 : : NULL, "error while calculating encap hash");
12736 : : return 0;
12737 : : }
12738 : :
12739 : : static int
12740 : 0 : flow_hw_table_resize_multi_pattern_actions(struct rte_eth_dev *dev,
12741 : : struct rte_flow_template_table *table,
12742 : : uint32_t nb_flows,
12743 : : struct rte_flow_error *error)
12744 : : {
12745 : 0 : struct mlx5_multi_pattern_segment *segment = table->mpctx.segments;
12746 : : uint32_t bulk_size;
12747 : : int i, ret;
12748 : :
12749 : : /**
12750 : : * Segment always allocates Modify Header Argument Objects number in
12751 : : * powers of 2.
12752 : : * On resize, PMD adds minimal required argument objects number.
12753 : : * For example, if table size was 10, it allocated 16 argument objects.
12754 : : * Resize to 15 will not add new objects.
12755 : : */
12756 : 0 : for (i = 1;
12757 [ # # # # ]: 0 : i < MLX5_MAX_TABLE_RESIZE_NUM && segment->capacity;
12758 : 0 : i++, segment++) {
12759 : : /* keep the devtools/checkpatches.sh happy */
12760 : : }
12761 [ # # ]: 0 : if (i == MLX5_MAX_TABLE_RESIZE_NUM)
12762 : 0 : return rte_flow_error_set(error, EINVAL,
12763 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12764 : : table, "too many resizes");
12765 [ # # ]: 0 : if (segment->head_index - 1 >= nb_flows)
12766 : : return 0;
12767 [ # # ]: 0 : bulk_size = rte_align32pow2(nb_flows - segment->head_index + 1);
12768 : 0 : ret = mlx5_tbl_multi_pattern_process(dev, table, segment,
12769 : : rte_log2_u32(bulk_size),
12770 : : error);
12771 [ # # ]: 0 : if (ret)
12772 : 0 : return rte_flow_error_set(error, EINVAL,
12773 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12774 : : table, "too many resizes");
12775 : : return i;
12776 : : }
12777 : :
12778 : : static int
12779 : 0 : flow_hw_table_resize(struct rte_eth_dev *dev,
12780 : : struct rte_flow_template_table *table,
12781 : : uint32_t nb_flows,
12782 : : struct rte_flow_error *error)
12783 : : {
12784 : : struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
12785 : : struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
12786 : 0 : struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr;
12787 : : struct mlx5_multi_pattern_segment *segment = NULL;
12788 : : struct mlx5dr_matcher *matcher = NULL;
12789 : 0 : uint32_t i, selector = table->matcher_selector;
12790 : 0 : uint32_t other_selector = (selector + 1) & 1;
12791 : : int ret;
12792 : :
12793 [ # # ]: 0 : if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
12794 : 0 : return rte_flow_error_set(error, EINVAL,
12795 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12796 : : table, "no resizable attribute");
12797 [ # # ]: 0 : if (table->matcher_info[other_selector].matcher)
12798 : 0 : return rte_flow_error_set(error, EINVAL,
12799 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12800 : : table, "last table resize was not completed");
12801 [ # # ]: 0 : if (nb_flows <= table->cfg.attr.nb_flows)
12802 : 0 : return rte_flow_error_set(error, EINVAL,
12803 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12804 : : table, "shrinking table is not supported");
12805 : 0 : ret = mlx5_ipool_resize(table->flow, nb_flows);
12806 [ # # ]: 0 : if (ret)
12807 : 0 : return rte_flow_error_set(error, EINVAL,
12808 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12809 : : table, "cannot resize flows pool");
12810 : : /*
12811 : : * A resizable matcher doesn't support rule update. In this case, the ipool
12812 : : * for the resource is not created and there is no need to resize it.
12813 : : */
12814 : : MLX5_ASSERT(!table->resource);
12815 [ # # ]: 0 : if (mlx5_is_multi_pattern_active(&table->mpctx)) {
12816 : 0 : ret = flow_hw_table_resize_multi_pattern_actions(dev, table, nb_flows, error);
12817 [ # # ]: 0 : if (ret < 0)
12818 : : return ret;
12819 [ # # ]: 0 : if (ret > 0)
12820 : 0 : segment = table->mpctx.segments + ret;
12821 : : }
12822 [ # # ]: 0 : for (i = 0; i < table->nb_item_templates; i++)
12823 : 0 : mt[i] = table->its[i]->mt;
12824 [ # # ]: 0 : for (i = 0; i < table->nb_action_templates; i++)
12825 : 0 : at[i] = table->ats[i].action_template->tmpl;
12826 : : nb_flows = rte_align32pow2(nb_flows);
12827 : 0 : matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
12828 : 0 : matcher = mlx5dr_matcher_create(table->grp->tbl, mt,
12829 : : table->nb_item_templates, at,
12830 : : table->nb_action_templates,
12831 : : &matcher_attr);
12832 [ # # ]: 0 : if (!matcher) {
12833 : 0 : ret = rte_flow_error_set(error, rte_errno,
12834 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12835 : : table, "failed to create new matcher");
12836 : 0 : goto error;
12837 : : }
12838 : 0 : rte_rwlock_write_lock(&table->matcher_replace_rwlk);
12839 : 0 : ret = mlx5dr_matcher_resize_set_target
12840 : : (table->matcher_info[selector].matcher, matcher);
12841 [ # # ]: 0 : if (ret) {
12842 : : rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
12843 : 0 : ret = rte_flow_error_set(error, rte_errno,
12844 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12845 : : table, "failed to initiate matcher swap");
12846 : 0 : goto error;
12847 : : }
12848 : 0 : table->cfg.attr.nb_flows = nb_flows;
12849 : 0 : table->matcher_info[other_selector].matcher = matcher;
12850 : 0 : table->matcher_selector = other_selector;
12851 : 0 : rte_atomic_store_explicit(&table->matcher_info[other_selector].refcnt,
12852 : : 0, rte_memory_order_relaxed);
12853 : : rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
12854 : 0 : return 0;
12855 : 0 : error:
12856 [ # # ]: 0 : if (segment)
12857 : 0 : mlx5_destroy_multi_pattern_segment(segment);
12858 [ # # ]: 0 : if (matcher) {
12859 : 0 : ret = mlx5dr_matcher_destroy(matcher);
12860 : 0 : return rte_flow_error_set(error, rte_errno,
12861 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12862 : : table, "failed to destroy new matcher");
12863 : : }
12864 : : return ret;
12865 : : }
12866 : :
12867 : : static int
12868 : 0 : flow_hw_table_resize_complete(__rte_unused struct rte_eth_dev *dev,
12869 : : struct rte_flow_template_table *table,
12870 : : struct rte_flow_error *error)
12871 : : {
12872 : : int ret;
12873 : 0 : uint32_t selector = table->matcher_selector;
12874 : 0 : uint32_t other_selector = (selector + 1) & 1;
12875 : : struct mlx5_matcher_info *matcher_info = &table->matcher_info[other_selector];
12876 : : uint32_t matcher_refcnt;
12877 : :
12878 [ # # ]: 0 : if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
12879 : 0 : return rte_flow_error_set(error, EINVAL,
12880 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12881 : : table, "no resizable attribute");
12882 : 0 : matcher_refcnt = rte_atomic_load_explicit(&matcher_info->refcnt,
12883 : : rte_memory_order_relaxed);
12884 [ # # # # ]: 0 : if (!matcher_info->matcher || matcher_refcnt)
12885 : 0 : return rte_flow_error_set(error, EBUSY,
12886 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12887 : : table, "cannot complete table resize");
12888 : 0 : ret = mlx5dr_matcher_destroy(matcher_info->matcher);
12889 [ # # ]: 0 : if (ret)
12890 : 0 : return rte_flow_error_set(error, rte_errno,
12891 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12892 : : table, "failed to destroy retired matcher");
12893 : 0 : matcher_info->matcher = NULL;
12894 : 0 : return 0;
12895 : : }
12896 : :
12897 : : static int
12898 : 0 : flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
12899 : : const struct rte_flow_op_attr *attr,
12900 : : struct rte_flow *flow, void *user_data,
12901 : : struct rte_flow_error *error)
12902 : : {
12903 : : int ret;
12904 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12905 : : struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
12906 : 0 : struct rte_flow_template_table *table = hw_flow->table;
12907 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
12908 : 0 : uint32_t table_selector = table->matcher_selector;
12909 : 0 : uint32_t rule_selector = aux->matcher_selector;
12910 : : uint32_t other_selector;
12911 : : struct mlx5dr_matcher *other_matcher;
12912 : 0 : struct mlx5dr_rule_attr rule_attr = {
12913 : : .queue_id = queue,
12914 : 0 : .burst = attr->postpone,
12915 : : };
12916 : :
12917 : : MLX5_ASSERT(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
12918 : : /**
12919 : : * mlx5dr_matcher_resize_rule_move() accepts original table matcher -
12920 : : * the one that was used BEFORE table resize.
12921 : : * Since the function is called AFTER table resize,
12922 : : * `table->matcher_selector` always points to the new matcher and
12923 : : * `aux->matcher_selector` points to a matcher used to create the flow.
12924 : : */
12925 : : other_selector = rule_selector == table_selector ?
12926 [ # # ]: 0 : (rule_selector + 1) & 1 : rule_selector;
12927 : 0 : other_matcher = table->matcher_info[other_selector].matcher;
12928 [ # # ]: 0 : if (!other_matcher)
12929 : 0 : return rte_flow_error_set(error, EINVAL,
12930 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12931 : : "no active table resize");
12932 : 0 : hw_flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE;
12933 : 0 : hw_flow->user_data = user_data;
12934 : 0 : rule_attr.user_data = hw_flow;
12935 [ # # ]: 0 : if (rule_selector == table_selector) {
12936 : 0 : struct rte_ring *ring = !attr->postpone ?
12937 [ # # ]: 0 : priv->hw_q[queue].flow_transfer_completed :
12938 : 0 : priv->hw_q[queue].flow_transfer_pending;
12939 : 0 : rte_ring_enqueue(ring, hw_flow);
12940 : : flow_hw_q_inc_flow_ops(priv, queue);
12941 : 0 : return 0;
12942 : : }
12943 : 0 : ret = mlx5dr_matcher_resize_rule_move(other_matcher,
12944 : 0 : (struct mlx5dr_rule *)hw_flow->rule,
12945 : : &rule_attr);
12946 [ # # ]: 0 : if (ret) {
12947 : 0 : return rte_flow_error_set(error, rte_errno,
12948 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12949 : : "flow transfer failed");
12950 : : }
12951 : : flow_hw_q_inc_flow_ops(priv, queue);
12952 : 0 : return 0;
12953 : : }
12954 : :
12955 : : const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
12956 : : .info_get = flow_hw_info_get,
12957 : : .configure = flow_hw_configure,
12958 : : .pattern_validate = flow_hw_pattern_validate,
12959 : : .pattern_template_create = flow_hw_pattern_template_create,
12960 : : .pattern_template_destroy = flow_hw_pattern_template_destroy,
12961 : : .actions_validate = flow_hw_actions_validate,
12962 : : .actions_template_create = flow_hw_actions_template_create,
12963 : : .actions_template_destroy = flow_hw_actions_template_destroy,
12964 : : .template_table_create = flow_hw_template_table_create,
12965 : : .template_table_destroy = flow_hw_table_destroy,
12966 : : .table_resize = flow_hw_table_resize,
12967 : : .group_set_miss_actions = flow_hw_group_set_miss_actions,
12968 : : .async_flow_create = flow_hw_async_flow_create,
12969 : : .async_flow_create_by_index = flow_hw_async_flow_create_by_index,
12970 : : .async_flow_update = flow_hw_async_flow_update,
12971 : : .async_flow_destroy = flow_hw_async_flow_destroy,
12972 : : .flow_update_resized = flow_hw_update_resized,
12973 : : .table_resize_complete = flow_hw_table_resize_complete,
12974 : : .pull = flow_hw_pull,
12975 : : .push = flow_hw_push,
12976 : : .async_action_create = flow_hw_action_handle_create,
12977 : : .async_action_destroy = flow_hw_action_handle_destroy,
12978 : : .async_action_update = flow_hw_action_handle_update,
12979 : : .async_action_query_update = flow_hw_async_action_handle_query_update,
12980 : : .async_action_query = flow_hw_action_handle_query,
12981 : : .action_validate = flow_hw_action_validate,
12982 : : .action_create = flow_hw_action_create,
12983 : : .action_destroy = flow_hw_action_destroy,
12984 : : .action_update = flow_hw_action_update,
12985 : : .action_query = flow_hw_action_query,
12986 : : .action_query_update = flow_hw_action_query_update,
12987 : : .action_list_handle_create = flow_hw_action_list_handle_create,
12988 : : .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
12989 : : .action_list_handle_query_update =
12990 : : flow_hw_action_list_handle_query_update,
12991 : : .async_action_list_handle_create =
12992 : : flow_hw_async_action_list_handle_create,
12993 : : .async_action_list_handle_destroy =
12994 : : flow_hw_async_action_list_handle_destroy,
12995 : : .async_action_list_handle_query_update =
12996 : : flow_hw_async_action_list_handle_query_update,
12997 : : .query = flow_hw_query,
12998 : : .get_aged_flows = flow_hw_get_aged_flows,
12999 : : .get_q_aged_flows = flow_hw_get_q_aged_flows,
13000 : : .item_create = flow_dv_item_create,
13001 : : .item_release = flow_dv_item_release,
13002 : : .flow_calc_table_hash = flow_hw_calc_table_hash,
13003 : : .flow_calc_encap_hash = flow_hw_calc_encap_hash,
13004 : : };
13005 : :
13006 : : /**
13007 : : * Creates a control flow using flow template API on @p proxy_dev device,
13008 : : * on behalf of @p owner_dev device.
13009 : : *
13010 : : * This function uses locks internally to synchronize access to the
13011 : : * flow queue.
13012 : : *
13013 : : * Created flow is stored in private list associated with @p proxy_dev device.
13014 : : *
13015 : : * @param owner_dev
13016 : : * Pointer to Ethernet device on behalf of which flow is created.
13017 : : * @param proxy_dev
13018 : : * Pointer to Ethernet device on which flow is created.
13019 : : * @param table
13020 : : * Pointer to flow table.
13021 : : * @param items
13022 : : * Pointer to flow rule items.
13023 : : * @param item_template_idx
13024 : : * Index of an item template associated with @p table.
13025 : : * @param actions
13026 : : * Pointer to flow rule actions.
13027 : : * @param action_template_idx
13028 : : * Index of an action template associated with @p table.
13029 : : * @param info
13030 : : * Additional info about control flow rule.
13031 : : * @param external
13032 : : * External ctrl flow.
13033 : : *
13034 : : * @return
13035 : : * 0 on success, negative errno value otherwise and rte_errno set.
13036 : : */
13037 : : static __rte_unused int
13038 : 0 : flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
13039 : : struct rte_eth_dev *proxy_dev,
13040 : : struct rte_flow_template_table *table,
13041 : : struct rte_flow_item items[],
13042 : : uint8_t item_template_idx,
13043 : : struct rte_flow_action actions[],
13044 : : uint8_t action_template_idx,
13045 : : struct mlx5_hw_ctrl_flow_info *info,
13046 : : bool external)
13047 : : {
13048 : 0 : struct mlx5_priv *priv = proxy_dev->data->dev_private;
13049 : 0 : uint32_t queue = CTRL_QUEUE_ID(priv);
13050 : 0 : struct rte_flow_op_attr op_attr = {
13051 : : .postpone = 0,
13052 : : };
13053 : : struct rte_flow *flow = NULL;
13054 : : struct mlx5_hw_ctrl_flow *entry = NULL;
13055 : : int ret;
13056 : :
13057 : 0 : rte_spinlock_lock(&priv->hw_ctrl_lock);
13058 : 0 : entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
13059 : : 0, SOCKET_ID_ANY);
13060 [ # # ]: 0 : if (!entry) {
13061 : 0 : DRV_LOG(ERR, "port %u not enough memory to create control flows",
13062 : : proxy_dev->data->port_id);
13063 : 0 : rte_errno = ENOMEM;
13064 : : ret = -rte_errno;
13065 : 0 : goto error;
13066 : : }
13067 : 0 : flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
13068 : : items, item_template_idx,
13069 : : actions, action_template_idx,
13070 : : NULL, NULL);
13071 [ # # ]: 0 : if (!flow) {
13072 : 0 : DRV_LOG(ERR, "port %u failed to enqueue create control"
13073 : : " flow operation", proxy_dev->data->port_id);
13074 : 0 : ret = -rte_errno;
13075 : 0 : goto error;
13076 : : }
13077 : 0 : ret = __flow_hw_pull_comp(proxy_dev, queue, NULL);
13078 [ # # ]: 0 : if (ret) {
13079 : 0 : DRV_LOG(ERR, "port %u failed to insert control flow",
13080 : : proxy_dev->data->port_id);
13081 : 0 : rte_errno = EINVAL;
13082 : : ret = -rte_errno;
13083 : 0 : goto error;
13084 : : }
13085 : 0 : entry->owner_dev = owner_dev;
13086 : 0 : entry->flow = flow;
13087 [ # # ]: 0 : if (info)
13088 : 0 : entry->info = *info;
13089 : : else
13090 : 0 : entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
13091 [ # # ]: 0 : if (external)
13092 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
13093 : : else
13094 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
13095 : : rte_spinlock_unlock(&priv->hw_ctrl_lock);
13096 : 0 : return 0;
13097 : 0 : error:
13098 [ # # ]: 0 : if (entry)
13099 : 0 : mlx5_free(entry);
13100 : : rte_spinlock_unlock(&priv->hw_ctrl_lock);
13101 : 0 : return ret;
13102 : : }
13103 : :
13104 : : /**
13105 : : * Destroys a control flow @p flow using flow template API on @p dev device.
13106 : : *
13107 : : * This function uses locks internally to synchronize access to the
13108 : : * flow queue.
13109 : : *
13110 : : * If the @p flow is stored on any private list/pool, then caller must free up
13111 : : * the relevant resources.
13112 : : *
13113 : : * @param dev
13114 : : * Pointer to Ethernet device.
13115 : : * @param flow
13116 : : * Pointer to flow rule.
13117 : : *
13118 : : * @return
13119 : : * 0 on success, non-zero value otherwise.
13120 : : */
13121 : : static int
13122 : 0 : flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
13123 : : {
13124 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13125 : 0 : uint32_t queue = CTRL_QUEUE_ID(priv);
13126 : 0 : struct rte_flow_op_attr op_attr = {
13127 : : .postpone = 0,
13128 : : };
13129 : : int ret;
13130 : :
13131 : 0 : rte_spinlock_lock(&priv->hw_ctrl_lock);
13132 : 0 : ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
13133 [ # # ]: 0 : if (ret) {
13134 : 0 : DRV_LOG(ERR, "port %u failed to enqueue destroy control"
13135 : : " flow operation", dev->data->port_id);
13136 : 0 : goto exit;
13137 : : }
13138 : 0 : ret = __flow_hw_pull_comp(dev, queue, NULL);
13139 [ # # ]: 0 : if (ret) {
13140 : 0 : DRV_LOG(ERR, "port %u failed to destroy control flow",
13141 : : dev->data->port_id);
13142 : 0 : rte_errno = EINVAL;
13143 : : ret = -rte_errno;
13144 : 0 : goto exit;
13145 : : }
13146 : 0 : exit:
13147 : : rte_spinlock_unlock(&priv->hw_ctrl_lock);
13148 : 0 : return ret;
13149 : : }
13150 : :
13151 : : /**
13152 : : * Destroys control flows created on behalf of @p owner device on @p dev device.
13153 : : *
13154 : : * @param dev
13155 : : * Pointer to Ethernet device on which control flows were created.
13156 : : * @param owner
13157 : : * Pointer to Ethernet device owning control flows.
13158 : : *
13159 : : * @return
13160 : : * 0 on success, otherwise negative error code is returned and
13161 : : * rte_errno is set.
13162 : : */
13163 : : static int
13164 : 0 : flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
13165 : : {
13166 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13167 : : struct mlx5_hw_ctrl_flow *cf;
13168 : : struct mlx5_hw_ctrl_flow *cf_next;
13169 : : int ret;
13170 : :
13171 : 0 : cf = LIST_FIRST(&priv->hw_ctrl_flows);
13172 [ # # ]: 0 : while (cf != NULL) {
13173 : 0 : cf_next = LIST_NEXT(cf, next);
13174 [ # # ]: 0 : if (cf->owner_dev == owner) {
13175 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
13176 [ # # ]: 0 : if (ret) {
13177 : 0 : rte_errno = ret;
13178 : 0 : return -ret;
13179 : : }
13180 [ # # ]: 0 : LIST_REMOVE(cf, next);
13181 : 0 : mlx5_free(cf);
13182 : : }
13183 : : cf = cf_next;
13184 : : }
13185 : : return 0;
13186 : : }
13187 : :
13188 : : /**
13189 : : * Destroys control flows created for @p owner_dev device.
13190 : : *
13191 : : * @param owner_dev
13192 : : * Pointer to Ethernet device owning control flows.
13193 : : *
13194 : : * @return
13195 : : * 0 on success, otherwise negative error code is returned and
13196 : : * rte_errno is set.
13197 : : */
13198 : : int
13199 : 0 : mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
13200 : : {
13201 : 0 : struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
13202 : : struct rte_eth_dev *proxy_dev;
13203 : 0 : uint16_t owner_port_id = owner_dev->data->port_id;
13204 : 0 : uint16_t proxy_port_id = owner_dev->data->port_id;
13205 : : int ret;
13206 : :
13207 : : /* Flush all flows created by this port for itself. */
13208 : 0 : ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
13209 [ # # ]: 0 : if (ret)
13210 : : return ret;
13211 : : /* Flush all flows created for this port on proxy port. */
13212 [ # # ]: 0 : if (owner_priv->sh->config.dv_esw_en) {
13213 : 0 : ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
13214 [ # # ]: 0 : if (ret == -ENODEV) {
13215 : 0 : DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
13216 : : "probably closed. Control flows were cleared.",
13217 : : owner_port_id);
13218 : 0 : rte_errno = 0;
13219 : 0 : return 0;
13220 [ # # ]: 0 : } else if (ret) {
13221 : 0 : DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
13222 : : owner_port_id, ret);
13223 : 0 : return ret;
13224 : : }
13225 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
13226 : : } else {
13227 : : proxy_dev = owner_dev;
13228 : : }
13229 : 0 : return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
13230 : : }
13231 : :
13232 : : /**
13233 : : * Destroys all control flows created on @p dev device.
13234 : : *
13235 : : * @param owner_dev
13236 : : * Pointer to Ethernet device.
13237 : : *
13238 : : * @return
13239 : : * 0 on success, otherwise negative error code is returned and
13240 : : * rte_errno is set.
13241 : : */
13242 : : static int
13243 : 0 : flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
13244 : : {
13245 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13246 : : struct mlx5_hw_ctrl_flow *cf;
13247 : : struct mlx5_hw_ctrl_flow *cf_next;
13248 : : int ret;
13249 : :
13250 : 0 : cf = LIST_FIRST(&priv->hw_ctrl_flows);
13251 [ # # ]: 0 : while (cf != NULL) {
13252 : 0 : cf_next = LIST_NEXT(cf, next);
13253 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
13254 [ # # ]: 0 : if (ret) {
13255 : 0 : rte_errno = ret;
13256 : 0 : return -ret;
13257 : : }
13258 [ # # ]: 0 : LIST_REMOVE(cf, next);
13259 : 0 : mlx5_free(cf);
13260 : : cf = cf_next;
13261 : : }
13262 : 0 : cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
13263 [ # # ]: 0 : while (cf != NULL) {
13264 : 0 : cf_next = LIST_NEXT(cf, next);
13265 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
13266 [ # # ]: 0 : if (ret) {
13267 : 0 : rte_errno = ret;
13268 : 0 : return -ret;
13269 : : }
13270 [ # # ]: 0 : LIST_REMOVE(cf, next);
13271 : 0 : mlx5_free(cf);
13272 : : cf = cf_next;
13273 : : }
13274 : : return 0;
13275 : : }
13276 : :
13277 : : int
13278 : 0 : mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
13279 : : {
13280 : 0 : uint16_t port_id = dev->data->port_id;
13281 : 0 : struct rte_flow_item_ethdev esw_mgr_spec = {
13282 : : .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
13283 : : };
13284 : 0 : struct rte_flow_item_ethdev esw_mgr_mask = {
13285 : : .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
13286 : : };
13287 : 0 : struct rte_flow_item_tag reg_c0_spec = {
13288 : : .index = (uint8_t)REG_C_0,
13289 : : .data = flow_hw_esw_mgr_regc_marker(dev),
13290 : : };
13291 : 0 : struct rte_flow_item_tag reg_c0_mask = {
13292 : : .index = 0xff,
13293 : : .data = flow_hw_esw_mgr_regc_marker_mask(dev),
13294 : : };
13295 : 0 : struct mlx5_rte_flow_item_sq sq_spec = {
13296 : : .queue = sqn,
13297 : : };
13298 : 0 : struct rte_flow_action_ethdev port = {
13299 : : .port_id = port_id,
13300 : : };
13301 : 0 : struct rte_flow_item items[3] = { { 0 } };
13302 : 0 : struct rte_flow_action actions[3] = { { 0 } };
13303 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13304 : : .type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
13305 : : .esw_mgr_sq = sqn,
13306 : : };
13307 : : struct rte_eth_dev *proxy_dev;
13308 : : struct mlx5_priv *proxy_priv;
13309 : 0 : uint16_t proxy_port_id = dev->data->port_id;
13310 : : int ret;
13311 : :
13312 : 0 : ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
13313 [ # # ]: 0 : if (ret) {
13314 : 0 : DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
13315 : : "port must be present to create default SQ miss flows.",
13316 : : port_id);
13317 : 0 : return ret;
13318 : : }
13319 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
13320 : 0 : proxy_priv = proxy_dev->data->dev_private;
13321 [ # # ]: 0 : if (!proxy_priv->dr_ctx) {
13322 : 0 : DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
13323 : : "for HWS to create default SQ miss flows. Default flows will "
13324 : : "not be created.",
13325 : : proxy_port_id, port_id);
13326 : 0 : return 0;
13327 : : }
13328 [ # # ]: 0 : if (!proxy_priv->hw_ctrl_fdb ||
13329 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
13330 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
13331 : 0 : DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
13332 : : "default flow tables were not created.",
13333 : : proxy_port_id, port_id);
13334 : 0 : rte_errno = ENOMEM;
13335 : 0 : return -rte_errno;
13336 : : }
13337 : : /*
13338 : : * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
13339 : : * and jump to group 1.
13340 : : */
13341 : 0 : items[0] = (struct rte_flow_item){
13342 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
13343 : : .spec = &esw_mgr_spec,
13344 : : .mask = &esw_mgr_mask,
13345 : : };
13346 : 0 : items[1] = (struct rte_flow_item){
13347 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
13348 : : .spec = &sq_spec,
13349 : : };
13350 : 0 : items[2] = (struct rte_flow_item){
13351 : : .type = RTE_FLOW_ITEM_TYPE_END,
13352 : : };
13353 : 0 : actions[0] = (struct rte_flow_action){
13354 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
13355 : : };
13356 : 0 : actions[1] = (struct rte_flow_action){
13357 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
13358 : : };
13359 : 0 : actions[2] = (struct rte_flow_action) {
13360 : : .type = RTE_FLOW_ACTION_TYPE_END,
13361 : : };
13362 : 0 : ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
13363 : : proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl,
13364 : : items, 0, actions, 0, &flow_info, external);
13365 [ # # ]: 0 : if (ret) {
13366 : 0 : DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
13367 : : port_id, sqn, ret);
13368 : 0 : return ret;
13369 : : }
13370 : : /*
13371 : : * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
13372 : : * and forward to port.
13373 : : */
13374 : 0 : items[0] = (struct rte_flow_item){
13375 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
13376 : : .spec = ®_c0_spec,
13377 : : .mask = ®_c0_mask,
13378 : : };
13379 : 0 : items[1] = (struct rte_flow_item){
13380 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
13381 : : .spec = &sq_spec,
13382 : : };
13383 : 0 : items[2] = (struct rte_flow_item){
13384 : : .type = RTE_FLOW_ITEM_TYPE_END,
13385 : : };
13386 : 0 : actions[0] = (struct rte_flow_action){
13387 : : .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
13388 : : .conf = &port,
13389 : : };
13390 : 0 : actions[1] = (struct rte_flow_action){
13391 : : .type = RTE_FLOW_ACTION_TYPE_END,
13392 : : };
13393 : 0 : flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
13394 : 0 : ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
13395 : 0 : proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
13396 : : items, 0, actions, 0, &flow_info, external);
13397 [ # # ]: 0 : if (ret) {
13398 : 0 : DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
13399 : : port_id, sqn, ret);
13400 : 0 : return ret;
13401 : : }
13402 : : return 0;
13403 : : }
13404 : :
13405 : : static bool
13406 : : flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
13407 : : struct rte_eth_dev *dev,
13408 : : uint32_t sqn)
13409 : : {
13410 : 0 : if (cf->owner_dev != dev)
13411 : : return false;
13412 [ # # # # ]: 0 : if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
13413 : : return true;
13414 [ # # # # ]: 0 : if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
13415 : : return true;
13416 : : return false;
13417 : : }
13418 : :
13419 : : int
13420 : 0 : mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
13421 : : {
13422 : 0 : uint16_t port_id = dev->data->port_id;
13423 : 0 : uint16_t proxy_port_id = dev->data->port_id;
13424 : : struct rte_eth_dev *proxy_dev;
13425 : : struct mlx5_priv *proxy_priv;
13426 : : struct mlx5_hw_ctrl_flow *cf;
13427 : : struct mlx5_hw_ctrl_flow *cf_next;
13428 : : int ret;
13429 : :
13430 : 0 : ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
13431 [ # # ]: 0 : if (ret) {
13432 : 0 : DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
13433 : : "port must be present for default SQ miss flow rules to exist.",
13434 : : port_id);
13435 : 0 : return ret;
13436 : : }
13437 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
13438 : 0 : proxy_priv = proxy_dev->data->dev_private;
13439 [ # # ]: 0 : if (!proxy_priv->dr_ctx)
13440 : : return 0;
13441 [ # # ]: 0 : if (!proxy_priv->hw_ctrl_fdb ||
13442 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
13443 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl)
13444 : : return 0;
13445 : 0 : cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
13446 [ # # ]: 0 : while (cf != NULL) {
13447 [ # # ]: 0 : cf_next = LIST_NEXT(cf, next);
13448 : : if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
13449 : 0 : claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
13450 [ # # ]: 0 : LIST_REMOVE(cf, next);
13451 : 0 : mlx5_free(cf);
13452 : : }
13453 : : cf = cf_next;
13454 : : }
13455 : : return 0;
13456 : : }
13457 : :
13458 : : int
13459 : 0 : mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
13460 : : {
13461 : 0 : uint16_t port_id = dev->data->port_id;
13462 : 0 : struct rte_flow_item_ethdev port_spec = {
13463 : : .port_id = port_id,
13464 : : };
13465 : 0 : struct rte_flow_item items[] = {
13466 : : {
13467 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
13468 : : .spec = &port_spec,
13469 : : },
13470 : : {
13471 : : .type = RTE_FLOW_ITEM_TYPE_END,
13472 : : },
13473 : : };
13474 : 0 : struct rte_flow_action_jump jump = {
13475 : : .group = 1,
13476 : : };
13477 : 0 : struct rte_flow_action actions[] = {
13478 : : {
13479 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
13480 : : .conf = &jump,
13481 : : },
13482 : : {
13483 : : .type = RTE_FLOW_ACTION_TYPE_END,
13484 : : }
13485 : : };
13486 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13487 : : .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
13488 : : };
13489 : : struct rte_eth_dev *proxy_dev;
13490 : : struct mlx5_priv *proxy_priv;
13491 : 0 : uint16_t proxy_port_id = dev->data->port_id;
13492 : : int ret;
13493 : :
13494 : 0 : ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
13495 [ # # ]: 0 : if (ret) {
13496 : 0 : DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
13497 : : "port must be present to create default FDB jump rule.",
13498 : : port_id);
13499 : 0 : return ret;
13500 : : }
13501 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
13502 : 0 : proxy_priv = proxy_dev->data->dev_private;
13503 [ # # ]: 0 : if (!proxy_priv->dr_ctx) {
13504 : 0 : DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
13505 : : "for HWS to create default FDB jump rule. Default rule will "
13506 : : "not be created.",
13507 : : proxy_port_id, port_id);
13508 : 0 : return 0;
13509 : : }
13510 [ # # # # ]: 0 : if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) {
13511 : 0 : DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
13512 : : "default flow tables were not created.",
13513 : : proxy_port_id, port_id);
13514 : 0 : rte_errno = EINVAL;
13515 : 0 : return -rte_errno;
13516 : : }
13517 : 0 : return flow_hw_create_ctrl_flow(dev, proxy_dev,
13518 : : proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl,
13519 : : items, 0, actions, 0, &flow_info, false);
13520 : : }
13521 : :
13522 : : int
13523 : 0 : mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
13524 : : {
13525 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13526 : 0 : struct rte_flow_item_eth promisc = {
13527 : : .hdr.dst_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
13528 : : .hdr.src_addr.addr_bytes = "\x00\x00\x00\x00\x00\x00",
13529 : : .hdr.ether_type = 0,
13530 : : };
13531 : 0 : struct rte_flow_item eth_all[] = {
13532 : : [0] = {
13533 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
13534 : : .spec = &promisc,
13535 : : .mask = &promisc,
13536 : : },
13537 : : [1] = {
13538 : : .type = RTE_FLOW_ITEM_TYPE_END,
13539 : : },
13540 : : };
13541 : 0 : struct rte_flow_action_modify_field mreg_action = {
13542 : : .operation = RTE_FLOW_MODIFY_SET,
13543 : : .dst = {
13544 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
13545 : : .tag_index = REG_C_1,
13546 : : },
13547 : : .src = {
13548 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
13549 : : .tag_index = REG_A,
13550 : : },
13551 : : .width = 32,
13552 : : };
13553 : 0 : struct rte_flow_action copy_reg_action[] = {
13554 : : [0] = {
13555 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
13556 : : .conf = &mreg_action,
13557 : : },
13558 : : [1] = {
13559 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
13560 : : },
13561 : : [2] = {
13562 : : .type = RTE_FLOW_ACTION_TYPE_END,
13563 : : },
13564 : : };
13565 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13566 : : .type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
13567 : : };
13568 : :
13569 : : MLX5_ASSERT(priv->master);
13570 [ # # ]: 0 : if (!priv->dr_ctx ||
13571 [ # # ]: 0 : !priv->hw_ctrl_fdb ||
13572 [ # # ]: 0 : !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
13573 : : return 0;
13574 : 0 : return flow_hw_create_ctrl_flow(dev, dev,
13575 : : priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl,
13576 : : eth_all, 0, copy_reg_action, 0, &flow_info, false);
13577 : : }
13578 : :
13579 : : int
13580 : 0 : mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
13581 : : {
13582 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13583 : 0 : struct mlx5_rte_flow_item_sq sq_spec = {
13584 : : .queue = sqn,
13585 : : };
13586 : 0 : struct rte_flow_item items[] = {
13587 : : {
13588 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
13589 : : .spec = &sq_spec,
13590 : : },
13591 : : {
13592 : : .type = RTE_FLOW_ITEM_TYPE_END,
13593 : : },
13594 : : };
13595 : : /*
13596 : : * Allocate actions array suitable for all cases - extended metadata enabled or not.
13597 : : * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
13598 : : */
13599 : 0 : struct rte_flow_action actions[] = {
13600 : : { .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
13601 : : { .type = RTE_FLOW_ACTION_TYPE_JUMP },
13602 : : { .type = RTE_FLOW_ACTION_TYPE_END },
13603 : : { .type = RTE_FLOW_ACTION_TYPE_END },
13604 : : };
13605 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13606 : : .type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
13607 : : .tx_repr_sq = sqn,
13608 : : };
13609 : :
13610 : : /* It is assumed that caller checked for representor matching. */
13611 : : MLX5_ASSERT(priv->sh->config.repr_matching);
13612 [ # # ]: 0 : if (!priv->dr_ctx) {
13613 : 0 : DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
13614 : : "default egress flow rules. Omitting creation.",
13615 : : dev->data->port_id);
13616 : 0 : return 0;
13617 : : }
13618 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_tbl) {
13619 : 0 : DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
13620 : : "egress flow rules does not exist.",
13621 : : dev->data->port_id);
13622 : 0 : rte_errno = EINVAL;
13623 : 0 : return -rte_errno;
13624 : : }
13625 : : /*
13626 : : * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
13627 : : * placed before terminating JUMP action.
13628 : : */
13629 [ # # ]: 0 : if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
13630 : 0 : actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
13631 : 0 : actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
13632 : : }
13633 : 0 : return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
13634 : : items, 0, actions, 0, &flow_info, external);
13635 : : }
13636 : :
13637 : : int
13638 : 0 : mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
13639 : : {
13640 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13641 : 0 : struct rte_flow_item_eth lacp_item = {
13642 : : .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
13643 : : };
13644 : 0 : struct rte_flow_item eth_lacp[] = {
13645 : : [0] = {
13646 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
13647 : : .spec = &lacp_item,
13648 : : .mask = &lacp_item,
13649 : : },
13650 : : [1] = {
13651 : : .type = RTE_FLOW_ITEM_TYPE_END,
13652 : : },
13653 : : };
13654 : 0 : struct rte_flow_action miss_action[] = {
13655 : : [0] = {
13656 : : .type = (enum rte_flow_action_type)
13657 : : MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
13658 : : },
13659 : : [1] = {
13660 : : .type = RTE_FLOW_ACTION_TYPE_END,
13661 : : },
13662 : : };
13663 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13664 : : .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
13665 : : };
13666 : :
13667 [ # # # # : 0 : if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
# # ]
13668 : : return 0;
13669 : 0 : return flow_hw_create_ctrl_flow(dev, dev,
13670 : : priv->hw_ctrl_fdb->hw_lacp_rx_tbl,
13671 : : eth_lacp, 0, miss_action, 0, &flow_info, false);
13672 : : }
13673 : :
13674 : : static uint32_t
13675 : : __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
13676 : : {
13677 : : switch (eth_pattern_type) {
13678 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
13679 : : return MLX5_CTRL_PROMISCUOUS;
13680 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
13681 : : return MLX5_CTRL_ALL_MULTICAST;
13682 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
13683 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
13684 : : return MLX5_CTRL_BROADCAST;
13685 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
13686 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
13687 : : return MLX5_CTRL_IPV4_MULTICAST;
13688 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
13689 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
13690 : : return MLX5_CTRL_IPV6_MULTICAST;
13691 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
13692 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
13693 : : return MLX5_CTRL_DMAC;
13694 : : default:
13695 : : /* Should not reach here. */
13696 : : MLX5_ASSERT(false);
13697 : : return 0;
13698 : : }
13699 : : }
13700 : :
13701 : : static uint32_t
13702 : : __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
13703 : : {
13704 [ # # ]: 0 : switch (eth_pattern_type) {
13705 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
13706 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
13707 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
13708 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
13709 : : return MLX5_CTRL_VLAN_FILTER;
13710 : 0 : default:
13711 : 0 : return 0;
13712 : : }
13713 : : }
13714 : :
13715 : : static bool
13716 [ # # ]: 0 : eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
13717 : : uint32_t flags)
13718 : : {
13719 : : uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
13720 : : uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
13721 : 0 : bool pattern_requested = !!(pattern_flags & flags);
13722 [ # # # # ]: 0 : bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
13723 : 0 : bool vlan_requested = !!(vlan_flags & flags);
13724 : :
13725 [ # # ]: 0 : if (consider_vlan)
13726 : 0 : return pattern_requested && vlan_requested;
13727 : : else
13728 : : return pattern_requested;
13729 : : }
13730 : :
13731 : : static bool
13732 : : rss_type_is_requested(struct mlx5_priv *priv,
13733 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
13734 : : {
13735 : 0 : struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
13736 : : unsigned int i;
13737 : :
13738 [ # # ]: 0 : for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
13739 [ # # ]: 0 : if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
13740 : 0 : const struct rte_flow_action_rss *rss = at->actions[i].conf;
13741 : 0 : uint64_t rss_types = rss->types;
13742 : :
13743 [ # # ]: 0 : if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
13744 : : return false;
13745 : : }
13746 : : }
13747 : : return true;
13748 : : }
13749 : :
13750 : : static const struct rte_flow_item_eth *
13751 : : __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
13752 : : {
13753 : 0 : switch (pattern) {
13754 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
13755 : : return &ctrl_rx_eth_promisc_spec;
13756 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
13757 : 0 : return &ctrl_rx_eth_mcast_spec;
13758 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
13759 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
13760 : 0 : return &ctrl_rx_eth_bcast_spec;
13761 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
13762 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
13763 : 0 : return &ctrl_rx_eth_ipv4_mcast_spec;
13764 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
13765 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
13766 : 0 : return &ctrl_rx_eth_ipv6_mcast_spec;
13767 : 0 : default:
13768 : : /* This case should not be reached. */
13769 : : MLX5_ASSERT(false);
13770 : 0 : return NULL;
13771 : : }
13772 : : }
13773 : :
13774 : : static int
13775 [ # # # # : 0 : __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
# # ]
13776 : : struct rte_flow_template_table *tbl,
13777 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
13778 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
13779 : : {
13780 : : const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
13781 : : struct rte_flow_item items[5];
13782 : 0 : struct rte_flow_action actions[] = {
13783 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
13784 : : { .type = RTE_FLOW_ACTION_TYPE_END },
13785 : : };
13786 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13787 : : .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
13788 : : };
13789 : :
13790 [ # # ]: 0 : if (!eth_spec)
13791 : : return -EINVAL;
13792 : : memset(items, 0, sizeof(items));
13793 : 0 : items[0] = (struct rte_flow_item){
13794 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
13795 : : .spec = eth_spec,
13796 : : };
13797 [ # # # ]: 0 : items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
13798 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
13799 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
13800 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
13801 : : /* Without VLAN filtering, only a single flow rule must be created. */
13802 : 0 : return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
13803 : : }
13804 : :
13805 : : static int
13806 : 0 : __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
13807 : : struct rte_flow_template_table *tbl,
13808 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
13809 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
13810 : : {
13811 [ # # # # : 0 : struct mlx5_priv *priv = dev->data->dev_private;
# # ]
13812 : : const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
13813 : : struct rte_flow_item items[5];
13814 : 0 : struct rte_flow_action actions[] = {
13815 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
13816 : : { .type = RTE_FLOW_ACTION_TYPE_END },
13817 : : };
13818 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13819 : : .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
13820 : : };
13821 : : unsigned int i;
13822 : :
13823 [ # # ]: 0 : if (!eth_spec)
13824 : : return -EINVAL;
13825 : : memset(items, 0, sizeof(items));
13826 : 0 : items[0] = (struct rte_flow_item){
13827 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
13828 : : .spec = eth_spec,
13829 : : };
13830 : : /* Optional VLAN for now will be VOID - will be filled later. */
13831 [ # # # ]: 0 : items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
13832 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
13833 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
13834 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
13835 : : /* Since VLAN filtering is done, create a single flow rule for each registered vid. */
13836 [ # # ]: 0 : for (i = 0; i < priv->vlan_filter_n; ++i) {
13837 : 0 : uint16_t vlan = priv->vlan_filter[i];
13838 : 0 : struct rte_flow_item_vlan vlan_spec = {
13839 [ # # ]: 0 : .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
13840 : : };
13841 : :
13842 : 0 : items[1].spec = &vlan_spec;
13843 [ # # ]: 0 : if (flow_hw_create_ctrl_flow(dev, dev,
13844 : : tbl, items, 0, actions, 0, &flow_info, false))
13845 : 0 : return -rte_errno;
13846 : : }
13847 : : return 0;
13848 : : }
13849 : :
13850 : : static int
13851 : 0 : __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
13852 : : struct rte_flow_template_table *tbl,
13853 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
13854 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
13855 : : {
13856 : : struct rte_flow_item_eth eth_spec;
13857 : : struct rte_flow_item items[5];
13858 : 0 : struct rte_flow_action actions[] = {
13859 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
13860 : : { .type = RTE_FLOW_ACTION_TYPE_END },
13861 : : };
13862 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13863 : : .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
13864 : : };
13865 [ # # # ]: 0 : const struct rte_ether_addr cmp = {
13866 : : .addr_bytes = "\x00\x00\x00\x00\x00\x00",
13867 : : };
13868 : : unsigned int i;
13869 : :
13870 : : RTE_SET_USED(pattern_type);
13871 : :
13872 : : memset(ð_spec, 0, sizeof(eth_spec));
13873 : : memset(items, 0, sizeof(items));
13874 : 0 : items[0] = (struct rte_flow_item){
13875 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
13876 : : .spec = ð_spec,
13877 : : };
13878 [ # # # ]: 0 : items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
13879 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
13880 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
13881 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
13882 [ # # ]: 0 : for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
13883 : 0 : struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
13884 : :
13885 [ # # ]: 0 : if (!memcmp(mac, &cmp, sizeof(*mac)))
13886 : 0 : continue;
13887 : : memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
13888 [ # # ]: 0 : if (flow_hw_create_ctrl_flow(dev, dev,
13889 : : tbl, items, 0, actions, 0, &flow_info, false))
13890 : 0 : return -rte_errno;
13891 : : }
13892 : : return 0;
13893 : : }
13894 : :
13895 : : static int
13896 : 0 : __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
13897 : : struct rte_flow_template_table *tbl,
13898 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
13899 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
13900 : : {
13901 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13902 : : struct rte_flow_item_eth eth_spec;
13903 : : struct rte_flow_item items[5];
13904 : 0 : struct rte_flow_action actions[] = {
13905 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
13906 : : { .type = RTE_FLOW_ACTION_TYPE_END },
13907 : : };
13908 : 0 : struct mlx5_hw_ctrl_flow_info flow_info = {
13909 : : .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
13910 : : };
13911 [ # # # ]: 0 : const struct rte_ether_addr cmp = {
13912 : : .addr_bytes = "\x00\x00\x00\x00\x00\x00",
13913 : : };
13914 : : unsigned int i;
13915 : : unsigned int j;
13916 : :
13917 : : RTE_SET_USED(pattern_type);
13918 : :
13919 : : memset(ð_spec, 0, sizeof(eth_spec));
13920 : : memset(items, 0, sizeof(items));
13921 : 0 : items[0] = (struct rte_flow_item){
13922 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
13923 : : .spec = ð_spec,
13924 : : };
13925 [ # # # ]: 0 : items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
13926 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
13927 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
13928 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
13929 [ # # ]: 0 : for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
13930 : 0 : struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
13931 : :
13932 [ # # ]: 0 : if (!memcmp(mac, &cmp, sizeof(*mac)))
13933 : 0 : continue;
13934 : : memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
13935 [ # # ]: 0 : for (j = 0; j < priv->vlan_filter_n; ++j) {
13936 : 0 : uint16_t vlan = priv->vlan_filter[j];
13937 : 0 : struct rte_flow_item_vlan vlan_spec = {
13938 [ # # ]: 0 : .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
13939 : : };
13940 : :
13941 : 0 : items[1].spec = &vlan_spec;
13942 [ # # ]: 0 : if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
13943 : : &flow_info, false))
13944 : 0 : return -rte_errno;
13945 : : }
13946 : : }
13947 : : return 0;
13948 : : }
13949 : :
13950 : : static int
13951 : 0 : __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
13952 : : struct rte_flow_template_table *tbl,
13953 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
13954 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
13955 : : {
13956 [ # # # # : 0 : switch (pattern_type) {
# ]
13957 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
13958 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
13959 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
13960 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
13961 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
13962 : 0 : return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
13963 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
13964 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
13965 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
13966 : 0 : return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
13967 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
13968 : 0 : return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
13969 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
13970 : 0 : return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
13971 : 0 : default:
13972 : : /* Should not reach here. */
13973 : : MLX5_ASSERT(false);
13974 : 0 : rte_errno = EINVAL;
13975 : 0 : return -EINVAL;
13976 : : }
13977 : : }
13978 : :
13979 : :
13980 : : int
13981 : 0 : mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
13982 : : {
13983 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13984 : : struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
13985 : : unsigned int i;
13986 : : unsigned int j;
13987 : : int ret = 0;
13988 : :
13989 : : RTE_SET_USED(priv);
13990 : : RTE_SET_USED(flags);
13991 [ # # ]: 0 : if (!priv->dr_ctx) {
13992 : 0 : DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
13993 : : "HWS needs to be configured beforehand.",
13994 : : dev->data->port_id);
13995 : 0 : return 0;
13996 : : }
13997 [ # # ]: 0 : if (!priv->hw_ctrl_rx) {
13998 : 0 : DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
13999 : : dev->data->port_id);
14000 : 0 : rte_errno = EINVAL;
14001 : 0 : return -rte_errno;
14002 : : }
14003 : : hw_ctrl_rx = priv->hw_ctrl_rx;
14004 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
14005 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
14006 : :
14007 [ # # ]: 0 : if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
14008 : 0 : continue;
14009 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
14010 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
14011 : : struct rte_flow_actions_template *at;
14012 : : struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
14013 : 0 : const struct mlx5_flow_template_table_cfg cfg = {
14014 : : .attr = tmpls->attr,
14015 : : .external = 0,
14016 : : };
14017 : :
14018 [ # # ]: 0 : if (!hw_ctrl_rx->rss[rss_type]) {
14019 : 0 : at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
14020 [ # # ]: 0 : if (!at)
14021 : 0 : return -rte_errno;
14022 : 0 : hw_ctrl_rx->rss[rss_type] = at;
14023 : : } else {
14024 : 0 : at = hw_ctrl_rx->rss[rss_type];
14025 : : }
14026 [ # # ]: 0 : if (!rss_type_is_requested(priv, rss_type))
14027 : 0 : continue;
14028 [ # # ]: 0 : if (!tmpls->tbl) {
14029 : 0 : tmpls->tbl = flow_hw_table_create(dev, &cfg,
14030 : : &tmpls->pt, 1, &at, 1, NULL);
14031 [ # # ]: 0 : if (!tmpls->tbl) {
14032 : 0 : DRV_LOG(ERR, "port %u Failed to create template table "
14033 : : "for control flow rules. Unable to create "
14034 : : "control flow rules.",
14035 : : dev->data->port_id);
14036 : 0 : return -rte_errno;
14037 : : }
14038 : : }
14039 : :
14040 : 0 : ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
14041 [ # # ]: 0 : if (ret) {
14042 : 0 : DRV_LOG(ERR, "port %u Failed to create control flow rule.",
14043 : : dev->data->port_id);
14044 : 0 : return ret;
14045 : : }
14046 : : }
14047 : : }
14048 : : return 0;
14049 : : }
14050 : :
14051 : : static __rte_always_inline uint32_t
14052 : : mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
14053 : : {
14054 : : uint32_t tbl_type;
14055 : :
14056 : 0 : if (domain->transfer)
14057 : : tbl_type = MLX5DR_ACTION_FLAG_HWS_FDB;
14058 [ # # ]: 0 : else if (domain->egress)
14059 : : tbl_type = MLX5DR_ACTION_FLAG_HWS_TX;
14060 [ # # ]: 0 : else if (domain->ingress)
14061 : : tbl_type = MLX5DR_ACTION_FLAG_HWS_RX;
14062 : : else
14063 : : tbl_type = UINT32_MAX;
14064 : : return tbl_type;
14065 : : }
14066 : :
14067 : : static struct mlx5_hw_encap_decap_action *
14068 : 0 : __mlx5_reformat_create(struct rte_eth_dev *dev,
14069 : : const struct rte_flow_action_raw_encap *encap_conf,
14070 : : const struct rte_flow_indir_action_conf *domain,
14071 : : enum mlx5dr_action_type type)
14072 : : {
14073 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
14074 : : struct mlx5_hw_encap_decap_action *handle;
14075 : : struct mlx5dr_action_reformat_header hdr;
14076 : : uint32_t flags;
14077 : :
14078 : : flags = mlx5_reformat_domain_to_tbl_type(domain);
14079 : 0 : flags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;
14080 [ # # ]: 0 : if (flags == UINT32_MAX) {
14081 : 0 : DRV_LOG(ERR, "Reformat: invalid indirect action configuration");
14082 : 0 : return NULL;
14083 : : }
14084 : : /* Allocate new list entry. */
14085 : 0 : handle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);
14086 [ # # ]: 0 : if (!handle) {
14087 : 0 : DRV_LOG(ERR, "Reformat: failed to allocate reformat entry");
14088 : 0 : return NULL;
14089 : : }
14090 : 0 : handle->action_type = type;
14091 [ # # ]: 0 : hdr.sz = encap_conf ? encap_conf->size : 0;
14092 [ # # ]: 0 : hdr.data = encap_conf ? encap_conf->data : NULL;
14093 : 0 : handle->action = mlx5dr_action_create_reformat(priv->dr_ctx,
14094 : : type, 1, &hdr, 0, flags);
14095 [ # # ]: 0 : if (!handle->action) {
14096 : 0 : DRV_LOG(ERR, "Reformat: failed to create reformat action");
14097 : 0 : mlx5_free(handle);
14098 : 0 : return NULL;
14099 : : }
14100 : : return handle;
14101 : : }
14102 : :
14103 : : /**
14104 : : * Create mlx5 reformat action.
14105 : : *
14106 : : * @param[in] dev
14107 : : * Pointer to rte_eth_dev structure.
14108 : : * @param[in] conf
14109 : : * Pointer to the indirect action parameters.
14110 : : * @param[in] encap_action
14111 : : * Pointer to the raw_encap action configuration.
14112 : : * @param[in] decap_action
14113 : : * Pointer to the raw_decap action configuration.
14114 : : * @param[out] error
14115 : : * Pointer to error structure.
14116 : : *
14117 : : * @return
14118 : : * A valid shared action handle in case of success, NULL otherwise and
14119 : : * rte_errno is set.
14120 : : */
14121 : : struct mlx5_hw_encap_decap_action*
14122 : 0 : mlx5_reformat_action_create(struct rte_eth_dev *dev,
14123 : : const struct rte_flow_indir_action_conf *conf,
14124 : : const struct rte_flow_action *encap_action,
14125 : : const struct rte_flow_action *decap_action,
14126 : : struct rte_flow_error *error)
14127 : : {
14128 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14129 : : struct mlx5_hw_encap_decap_action *handle;
14130 : : const struct rte_flow_action_raw_encap *encap = NULL;
14131 : : const struct rte_flow_action_raw_decap *decap = NULL;
14132 : : enum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;
14133 : :
14134 : : MLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);
14135 : : MLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);
14136 [ # # ]: 0 : if (priv->sh->config.dv_flow_en != 2) {
14137 : 0 : rte_flow_error_set(error, ENOTSUP,
14138 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14139 : : "Reformat: hardware does not support");
14140 : 0 : return NULL;
14141 : : }
14142 [ # # # # ]: 0 : if (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {
14143 : 0 : rte_flow_error_set(error, EINVAL,
14144 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14145 : : "Reformat: domain should be specified");
14146 : 0 : return NULL;
14147 : : }
14148 [ # # # # : 0 : if ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {
# # # # ]
14149 : 0 : rte_flow_error_set(error, EINVAL,
14150 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14151 : : "Reformat: missed action configuration");
14152 : 0 : return NULL;
14153 : : }
14154 [ # # ]: 0 : if (encap_action && !decap_action) {
14155 : 0 : encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
14156 [ # # ]: 0 : if (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||
14157 : : encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
14158 : 0 : rte_flow_error_set(error, EINVAL,
14159 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14160 : : "Reformat: Invalid encap length");
14161 : 0 : return NULL;
14162 : : }
14163 : : type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
14164 [ # # ]: 0 : } else if (decap_action && !encap_action) {
14165 : 0 : decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
14166 [ # # ]: 0 : if (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
14167 : 0 : rte_flow_error_set(error, EINVAL,
14168 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14169 : : "Reformat: Invalid decap length");
14170 : 0 : return NULL;
14171 : : }
14172 : : type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
14173 [ # # ]: 0 : } else if (encap_action && decap_action) {
14174 : 0 : decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
14175 : 0 : encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
14176 [ # # ]: 0 : if (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&
14177 [ # # # # ]: 0 : encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
14178 : : encap->size <= MLX5_ENCAP_MAX_LEN) {
14179 : : type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
14180 [ # # ]: 0 : } else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
14181 [ # # ]: 0 : encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
14182 : : type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
14183 : : } else {
14184 : 0 : rte_flow_error_set(error, EINVAL,
14185 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14186 : : "Reformat: Invalid decap & encap length");
14187 : 0 : return NULL;
14188 : : }
14189 [ # # ]: 0 : } else if (!encap_action && !decap_action) {
14190 : 0 : rte_flow_error_set(error, EINVAL,
14191 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14192 : : "Reformat: Invalid decap & encap configurations");
14193 : 0 : return NULL;
14194 : : }
14195 [ # # ]: 0 : if (!priv->dr_ctx) {
14196 : 0 : rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
14197 : : encap_action, "Reformat: HWS not supported");
14198 : 0 : return NULL;
14199 : : }
14200 : 0 : handle = __mlx5_reformat_create(dev, encap, conf, type);
14201 [ # # ]: 0 : if (!handle) {
14202 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
14203 : : "Reformat: failed to create indirect action");
14204 : 0 : return NULL;
14205 : : }
14206 : : return handle;
14207 : : }
14208 : :
14209 : : /**
14210 : : * Destroy the indirect reformat action.
14211 : : * Release action related resources on the NIC and the memory.
14212 : : * Lock free, (mutex should be acquired by caller).
14213 : : *
14214 : : * @param[in] dev
14215 : : * Pointer to the Ethernet device structure.
14216 : : * @param[in] handle
14217 : : * The indirect action list handle to be removed.
14218 : : * @param[out] error
14219 : : * Perform verbose error reporting if not NULL. Initialized in case of
14220 : : * error only.
14221 : : *
14222 : : * @return
14223 : : * 0 on success, otherwise negative errno value.
14224 : : */
14225 : : int
14226 : 0 : mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
14227 : : struct rte_flow_action_list_handle *handle,
14228 : : struct rte_flow_error *error)
14229 : : {
14230 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14231 : : struct mlx5_hw_encap_decap_action *action;
14232 : :
14233 : : action = (struct mlx5_hw_encap_decap_action *)handle;
14234 [ # # # # ]: 0 : if (!priv->dr_ctx || !action)
14235 : 0 : return rte_flow_error_set(error, ENOTSUP,
14236 : : RTE_FLOW_ERROR_TYPE_ACTION, handle,
14237 : : "Reformat: invalid action handle");
14238 : 0 : mlx5dr_action_destroy(action->action);
14239 : 0 : mlx5_free(handle);
14240 : 0 : return 0;
14241 : : }
14242 : :
14243 : : static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops = {
14244 : : .async_create = flow_hw_async_flow_create,
14245 : : .async_create_by_index = flow_hw_async_flow_create_by_index,
14246 : : .async_actions_update = flow_hw_async_flow_update,
14247 : : .async_destroy = flow_hw_async_flow_destroy,
14248 : : .push = flow_hw_push,
14249 : : .pull = flow_hw_pull,
14250 : : .async_action_handle_create = flow_hw_action_handle_create,
14251 : : .async_action_handle_destroy = flow_hw_action_handle_destroy,
14252 : : .async_action_handle_update = flow_hw_action_handle_update,
14253 : : .async_action_handle_query = flow_hw_action_handle_query,
14254 : : .async_action_handle_query_update = flow_hw_async_action_handle_query_update,
14255 : : .async_action_list_handle_create = flow_hw_async_action_list_handle_create,
14256 : : .async_action_list_handle_destroy = flow_hw_async_action_list_handle_destroy,
14257 : : .async_action_list_handle_query_update =
14258 : : flow_hw_async_action_list_handle_query_update,
14259 : : };
14260 : :
14261 : : #endif
|