Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3 : : */
4 : :
5 : : #include <eal_export.h>
6 : : #include <rte_flow.h>
7 : : #include <rte_flow_driver.h>
8 : : #include <rte_stdatomic.h>
9 : :
10 : : #include <mlx5_malloc.h>
11 : :
12 : : #include "mlx5.h"
13 : : #include "mlx5_common.h"
14 : : #include "mlx5_defs.h"
15 : : #include "mlx5_flow.h"
16 : : #include "mlx5_flow_os.h"
17 : : #include "mlx5_rx.h"
18 : :
19 : : #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
20 : : #include "mlx5_hws_cnt.h"
21 : :
22 : : /** Fast path async flow API functions. */
23 : : static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops;
24 : :
25 : : /*
26 : : * The default ipool threshold value indicates which per_core_cache
27 : : * value to set.
28 : : */
29 : : #define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
30 : : /* The default min local cache size. */
31 : : #define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
32 : :
33 : : /* Default push burst threshold. */
34 : : #define BURST_THR 32u
35 : :
36 : : /* Default queue to flush the flows. */
37 : : #define MLX5_DEFAULT_FLUSH_QUEUE 0
38 : :
39 : : /* Maximum number of rules in control flow tables. */
40 : : #define MLX5_HW_CTRL_FLOW_NB_RULES (4096)
41 : :
42 : : /* Lowest flow group usable by an application if group translation is done. */
43 : : #define MLX5_HW_LOWEST_USABLE_GROUP (1)
44 : :
45 : : /* Maximum group index usable by user applications for transfer flows. */
46 : : #define MLX5_HW_MAX_TRANSFER_GROUP (UINT32_MAX - 1)
47 : :
48 : : /* Maximum group index usable by user applications for egress flows. */
49 : : #define MLX5_HW_MAX_EGRESS_GROUP (UINT32_MAX - 1)
50 : :
51 : : /* Lowest priority for HW root table. */
52 : : #define MLX5_HW_LOWEST_PRIO_ROOT 15
53 : :
54 : : /* Lowest priority for HW non-root table. */
55 : : #define MLX5_HW_LOWEST_PRIO_NON_ROOT (UINT32_MAX)
56 : :
57 : : /* Priorities for Rx control flow rules. */
58 : : #define MLX5_HW_CTRL_RX_PRIO_L2 (MLX5_HW_LOWEST_PRIO_ROOT)
59 : : #define MLX5_HW_CTRL_RX_PRIO_L3 (MLX5_HW_LOWEST_PRIO_ROOT - 1)
60 : : #define MLX5_HW_CTRL_RX_PRIO_L4 (MLX5_HW_LOWEST_PRIO_ROOT - 2)
61 : :
62 : : #define MLX5_HW_VLAN_PUSH_TYPE_IDX 0
63 : : #define MLX5_HW_VLAN_PUSH_VID_IDX 1
64 : : #define MLX5_HW_VLAN_PUSH_PCP_IDX 2
65 : :
66 : : struct mlx5_indlst_legacy {
67 : : struct mlx5_indirect_list indirect;
68 : : struct rte_flow_action_handle *handle;
69 : : enum rte_flow_action_type legacy_type;
70 : : };
71 : :
72 : : #define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
73 : : (((const struct encap_type *)(ptr))->definition)
74 : :
75 : : /**
76 : : * Returns the size of a struct with a following layout:
77 : : *
78 : : * @code{.c}
79 : : * struct rte_flow_hw {
80 : : * // rte_flow_hw fields
81 : : * uint8_t rule[mlx5dr_rule_get_handle_size()];
82 : : * };
83 : : * @endcode
84 : : *
85 : : * Such struct is used as a basic container for HW Steering flow rule.
86 : : */
87 : : static size_t
88 : : mlx5_flow_hw_entry_size(void)
89 : : {
90 : 0 : return sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
91 : : }
92 : :
93 : : /**
94 : : * Returns the size of "auxed" rte_flow_hw structure which is assumed to be laid out as follows:
95 : : *
96 : : * @code{.c}
97 : : * struct {
98 : : * struct rte_flow_hw {
99 : : * // rte_flow_hw fields
100 : : * uint8_t rule[mlx5dr_rule_get_handle_size()];
101 : : * } flow;
102 : : * struct rte_flow_hw_aux aux;
103 : : * };
104 : : * @endcode
105 : : *
106 : : * Such struct is used whenever rte_flow_hw_aux cannot be allocated separately from the rte_flow_hw
107 : : * e.g., when table is resizable.
108 : : */
109 : : static size_t
110 : : mlx5_flow_hw_auxed_entry_size(void)
111 : : {
112 : 0 : size_t rule_size = mlx5dr_rule_get_handle_size();
113 : :
114 : 0 : return sizeof(struct rte_flow_hw) + rule_size + sizeof(struct rte_flow_hw_aux);
115 : : }
116 : :
117 : : /**
118 : : * Returns a valid pointer to rte_flow_hw_aux associated with given rte_flow_hw
119 : : * depending on template table configuration.
120 : : */
121 : : static __rte_always_inline struct rte_flow_hw_aux *
122 : : mlx5_flow_hw_aux(uint16_t port_id, struct rte_flow_hw *flow)
123 : : {
124 : 0 : struct rte_flow_template_table *table = flow->table;
125 : :
126 : 0 : if (!flow->nt_rule) {
127 [ # # # # : 0 : if (rte_flow_template_table_resizable(port_id, &table->cfg.attr)) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
128 : 0 : size_t offset = sizeof(struct rte_flow_hw) + mlx5dr_rule_get_handle_size();
129 : :
130 : 0 : return RTE_PTR_ADD(flow, offset);
131 : : } else {
132 : 0 : return &table->flow_aux[flow->idx - 1];
133 : : }
134 : : } else {
135 : 0 : return flow->nt2hws->flow_aux;
136 : : }
137 : : }
138 : :
139 : : static __rte_always_inline void
140 : : mlx5_flow_hw_aux_set_age_idx(struct rte_flow_hw *flow,
141 : : struct rte_flow_hw_aux *aux,
142 : : uint32_t age_idx)
143 : : {
144 : : /*
145 : : * Only when creating a flow rule, the type will be set explicitly.
146 : : * Or else, it should be none in the rule update case.
147 : : */
148 [ # # # # : 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
# # # # #
# # # # #
# # # # #
# ]
149 : 0 : aux->upd.age_idx = age_idx;
150 : : else
151 : 0 : aux->orig.age_idx = age_idx;
152 : : }
153 : :
154 : : static __rte_always_inline uint32_t
155 : : mlx5_flow_hw_aux_get_age_idx(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
156 : : {
157 [ # # # # : 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
# # # # #
# # # ]
158 : 0 : return aux->upd.age_idx;
159 : : else
160 : 0 : return aux->orig.age_idx;
161 : : }
162 : :
163 : : static __rte_always_inline void
164 : : mlx5_flow_hw_aux_set_mtr_id(struct rte_flow_hw *flow,
165 : : struct rte_flow_hw_aux *aux,
166 : : uint32_t mtr_id)
167 : : {
168 [ # # # # : 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
# # # # #
# ]
169 : 0 : aux->upd.mtr_id = mtr_id;
170 : : else
171 : 0 : aux->orig.mtr_id = mtr_id;
172 : : }
173 : :
174 : : static __rte_always_inline uint32_t
175 : : mlx5_flow_hw_aux_get_mtr_id(struct rte_flow_hw *flow, struct rte_flow_hw_aux *aux)
176 : : {
177 [ # # ]: 0 : if (unlikely(flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE))
178 : 0 : return aux->upd.mtr_id;
179 : : else
180 : 0 : return aux->orig.mtr_id;
181 : : }
182 : :
183 : : static __rte_always_inline struct mlx5_hw_q_job *
184 : : flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
185 : : const struct rte_flow_action_handle *handle,
186 : : void *user_data, void *query_data,
187 : : enum mlx5_hw_job_type type,
188 : : enum mlx5_hw_indirect_type indirect_type,
189 : : struct rte_flow_error *error);
190 : : static void
191 : : flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow,
192 : : struct rte_flow_error *error);
193 : :
194 : : static int
195 : : mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
196 : : struct rte_flow_template_table *tbl,
197 : : struct mlx5_multi_pattern_segment *segment,
198 : : uint32_t bulk_size,
199 : : struct rte_flow_error *error);
200 : : static void
201 : : mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment);
202 : :
203 : : static __rte_always_inline enum mlx5_indirect_list_type
204 : : flow_hw_inlist_type_get(const struct rte_flow_action *actions);
205 : :
206 : : static int
207 : : flow_hw_allocate_actions(struct rte_eth_dev *dev,
208 : : uint64_t action_flags,
209 : : struct rte_flow_error *error);
210 : :
211 : : static int
212 : : flow_hw_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
213 : : const struct rte_flow_item items[],
214 : : const struct rte_flow_action actions[],
215 : : bool external __rte_unused, int hairpin __rte_unused,
216 : : struct rte_flow_error *error);
217 : :
218 : : bool
219 : 0 : mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error)
220 : : {
221 : 0 : const struct mlx5_priv *priv = dev->data->dev_private;
222 : :
223 [ # # ]: 0 : if (!priv->dr_ctx) {
224 : 0 : rte_flow_error_set(error, EINVAL,
225 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
226 : : "non-template flow engine was not configured");
227 : 0 : return false;
228 : : }
229 : : return true;
230 : : }
231 : :
232 : : static int
233 : : flow_hw_allocate_actions(struct rte_eth_dev *dev,
234 : : uint64_t action_flags,
235 : : struct rte_flow_error *error);
236 : :
237 : : static __rte_always_inline int
238 : : mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
239 : : {
240 : : switch (type) {
241 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
242 : : return 0;
243 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
244 : : return 1;
245 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
246 : : return 2;
247 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
248 : : return 3;
249 : : default:
250 : : break;
251 : : }
252 : : return -1;
253 : : }
254 : :
255 : : /* Include only supported reformat actions for BWC non template API. */
256 : : static __rte_always_inline int
257 : : mlx5_bwc_multi_pattern_reformat_to_index(enum mlx5dr_action_type type)
258 : : {
259 : 0 : switch (type) {
260 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
261 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
262 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
263 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
264 : : return mlx5_multi_pattern_reformat_to_index(type);
265 : : default:
266 : : break;
267 : : }
268 : : return -1;
269 : : }
270 : :
271 : : static __rte_always_inline enum mlx5dr_action_type
272 : : mlx5_multi_pattern_reformat_index_to_type(uint32_t ix)
273 : : {
274 : : switch (ix) {
275 : : case 0:
276 : : return MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
277 : : case 1:
278 : : return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
279 : : case 2:
280 : : return MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
281 : : case 3:
282 : : return MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
283 : : default:
284 : : break;
285 : : }
286 : : return MLX5DR_ACTION_TYP_MAX;
287 : : }
288 : :
289 : : static inline enum mlx5dr_table_type
290 : : get_mlx5dr_fdb_table_type(const struct rte_flow_attr *attr,
291 : : uint32_t specialize, bool fdb_unified_en)
292 : : {
293 [ # # # # ]: 0 : if (fdb_unified_en && !!attr->group) {
294 [ # # ]: 0 : if ((specialize & (RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
295 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)) == 0)
296 : : return MLX5DR_TABLE_TYPE_FDB_UNIFIED;
297 : : MLX5_ASSERT((specialize & (RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
298 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)) !=
299 : : (RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
300 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG));
301 [ # # ]: 0 : if (specialize & RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG)
302 : : return MLX5DR_TABLE_TYPE_FDB_RX;
303 [ # # ]: 0 : if (specialize & RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG)
304 : 0 : return MLX5DR_TABLE_TYPE_FDB_TX;
305 : : }
306 : :
307 : : return MLX5DR_TABLE_TYPE_FDB;
308 : : }
309 : :
310 : : static inline enum mlx5dr_table_type
311 : 0 : get_mlx5dr_table_type(const struct rte_flow_attr *attr, uint32_t specialize,
312 : : bool fdb_unified_en)
313 : : {
314 : : enum mlx5dr_table_type type;
315 : :
316 [ # # ]: 0 : if (attr->transfer)
317 : : type = get_mlx5dr_fdb_table_type(attr, specialize, fdb_unified_en);
318 [ # # ]: 0 : else if (attr->egress)
319 : : type = MLX5DR_TABLE_TYPE_NIC_TX;
320 : : else
321 : : type = MLX5DR_TABLE_TYPE_NIC_RX;
322 : 0 : return type;
323 : : }
324 : :
325 : : /* Non template default queue size used for inner ctrl queue. */
326 : : #define MLX5_NT_DEFAULT_QUEUE_SIZE 32
327 : :
328 : : static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
329 : : static int flow_hw_translate_group(struct rte_eth_dev *dev,
330 : : const struct mlx5_flow_template_table_cfg *cfg,
331 : : uint32_t group,
332 : : uint32_t *table_group,
333 : : struct rte_flow_error *error);
334 : : static __rte_always_inline int
335 : : flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
336 : : struct mlx5_modification_cmd *mhdr_cmd,
337 : : struct mlx5_action_construct_data *act_data,
338 : : const struct mlx5_hw_actions *hw_acts,
339 : : const struct rte_flow_action *action);
340 : : static void
341 : : flow_hw_construct_quota(struct mlx5_priv *priv,
342 : : struct mlx5dr_rule_action *rule_act, uint32_t qid);
343 : :
344 : : static int
345 : : mlx5_flow_ct_init(struct rte_eth_dev *dev,
346 : : uint32_t nb_conn_tracks,
347 : : uint16_t nb_queue);
348 : :
349 : : static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
350 : : static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
351 : :
352 : : static int flow_hw_async_create_validate(struct rte_eth_dev *dev,
353 : : const uint32_t queue,
354 : : const struct rte_flow_template_table *table,
355 : : enum rte_flow_table_insertion_type insertion_type,
356 : : const uint32_t rule_index,
357 : : const struct rte_flow_item items[],
358 : : const uint8_t pattern_template_index,
359 : : const struct rte_flow_action actions[],
360 : : const uint8_t action_template_index,
361 : : struct rte_flow_error *error);
362 : : static int flow_hw_async_update_validate(struct rte_eth_dev *dev,
363 : : const uint32_t queue,
364 : : const struct rte_flow_hw *flow,
365 : : const struct rte_flow_action actions[],
366 : : const uint8_t action_template_index,
367 : : struct rte_flow_error *error);
368 : : static int flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
369 : : const uint32_t queue,
370 : : const struct rte_flow_hw *flow,
371 : : struct rte_flow_error *error);
372 : : static bool flow_hw_should_create_nat64_actions(struct mlx5_priv *priv);
373 : :
374 : : const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;
375 : :
376 : : /* DR action flags with different table. */
377 : : static uint32_t mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_MAX]
378 : : [MLX5DR_TABLE_TYPE_MAX] = {
379 : : {
380 : : MLX5DR_ACTION_FLAG_ROOT_RX,
381 : : MLX5DR_ACTION_FLAG_ROOT_TX,
382 : : MLX5DR_ACTION_FLAG_ROOT_FDB,
383 : : MLX5DR_ACTION_FLAG_ROOT_FDB,
384 : : MLX5DR_ACTION_FLAG_ROOT_FDB,
385 : : MLX5DR_ACTION_FLAG_ROOT_FDB,
386 : : },
387 : : {
388 : : MLX5DR_ACTION_FLAG_HWS_RX,
389 : : MLX5DR_ACTION_FLAG_HWS_TX,
390 : : MLX5DR_ACTION_FLAG_HWS_FDB,
391 : : MLX5DR_ACTION_FLAG_HWS_FDB_RX,
392 : : MLX5DR_ACTION_FLAG_HWS_FDB_TX,
393 : : MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED,
394 : : },
395 : : };
396 : :
397 : : /**
398 : : * Jump table flags.
399 : : * Can jump to FDB_RX table from FDB_RX or UNIFIED tables.
400 : : * Can jump to FDB_TX table from FDB_TX or UNIFIED tables.
401 : : * Can jump to UNIFIED table from all tables.
402 : : */
403 : : static uint32_t mlx5_hw_act_dest_table_flag[MLX5DR_TABLE_TYPE_MAX] = {
404 : : MLX5DR_ACTION_FLAG_HWS_RX,
405 : : MLX5DR_ACTION_FLAG_HWS_TX,
406 : : MLX5DR_ACTION_FLAG_HWS_FDB,
407 : : (MLX5DR_ACTION_FLAG_HWS_FDB_RX | MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED),
408 : : (MLX5DR_ACTION_FLAG_HWS_FDB_TX | MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED),
409 : : (MLX5DR_ACTION_FLAG_HWS_FDB_RX | MLX5DR_ACTION_FLAG_HWS_FDB_TX |
410 : : MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED),
411 : : };
412 : :
413 : : /* Ethernet item spec for promiscuous mode. */
414 : : static const struct rte_flow_item_eth ctrl_rx_eth_promisc_spec = {
415 : : .hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
416 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
417 : : .hdr.ether_type = 0,
418 : : };
419 : : /* Ethernet item mask for promiscuous mode. */
420 : : static const struct rte_flow_item_eth ctrl_rx_eth_promisc_mask = {
421 : : .hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
422 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
423 : : .hdr.ether_type = 0,
424 : : };
425 : :
426 : : /* Ethernet item spec for all multicast mode. */
427 : : static const struct rte_flow_item_eth ctrl_rx_eth_mcast_spec = {
428 : : .hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
429 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
430 : : .hdr.ether_type = 0,
431 : : };
432 : : /* Ethernet item mask for all multicast mode. */
433 : : static const struct rte_flow_item_eth ctrl_rx_eth_mcast_mask = {
434 : : .hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 },
435 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
436 : : .hdr.ether_type = 0,
437 : : };
438 : :
439 : : /* Ethernet item spec for IPv4 multicast traffic. */
440 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_spec = {
441 : : .hdr.dst_addr.addr_bytes = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 },
442 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
443 : : .hdr.ether_type = 0,
444 : : };
445 : : /* Ethernet item mask for IPv4 multicast traffic. */
446 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv4_mcast_mask = {
447 : : .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0x00, 0x00, 0x00 },
448 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
449 : : .hdr.ether_type = 0,
450 : : };
451 : :
452 : : /* Ethernet item spec for IPv6 multicast traffic. */
453 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_spec = {
454 : : .hdr.dst_addr.addr_bytes = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 },
455 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
456 : : .hdr.ether_type = 0,
457 : : };
458 : : /* Ethernet item mask for IPv6 multicast traffic. */
459 : : static const struct rte_flow_item_eth ctrl_rx_eth_ipv6_mcast_mask = {
460 : : .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
461 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
462 : : .hdr.ether_type = 0,
463 : : };
464 : :
465 : : /* Ethernet item mask for unicast traffic. */
466 : : static const struct rte_flow_item_eth ctrl_rx_eth_dmac_mask = {
467 : : .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
468 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
469 : : .hdr.ether_type = 0,
470 : : };
471 : :
472 : : /* Ethernet item spec for broadcast. */
473 : : static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = {
474 : : .hdr.dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
475 : : .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
476 : : .hdr.ether_type = 0,
477 : : };
478 : :
479 : : static inline uint32_t
480 : : flow_hw_q_pending(struct mlx5_priv *priv, uint32_t queue)
481 : : {
482 : 0 : struct mlx5_hw_q *q = &priv->hw_q[queue];
483 : :
484 : : MLX5_ASSERT(q->size >= q->job_idx);
485 : 0 : return (q->size - q->job_idx) + q->ongoing_flow_ops;
486 : : }
487 : :
488 : : static inline void
489 : 0 : flow_hw_q_inc_flow_ops(struct mlx5_priv *priv, uint32_t queue)
490 : : {
491 : 0 : struct mlx5_hw_q *q = &priv->hw_q[queue];
492 : :
493 : 0 : q->ongoing_flow_ops++;
494 : 0 : }
495 : :
496 : : static inline void
497 : : flow_hw_q_dec_flow_ops(struct mlx5_priv *priv, uint32_t queue)
498 : : {
499 : 0 : struct mlx5_hw_q *q = &priv->hw_q[queue];
500 : :
501 : 0 : q->ongoing_flow_ops--;
502 : : }
503 : :
504 : : static inline enum mlx5dr_matcher_insert_mode
505 : : flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type)
506 : : {
507 : 0 : if (insert_type == RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
508 : : return MLX5DR_MATCHER_INSERT_BY_HASH;
509 : : else
510 : 0 : return MLX5DR_MATCHER_INSERT_BY_INDEX;
511 : : }
512 : :
513 : : static inline enum mlx5dr_matcher_distribute_mode
514 : : flow_hw_matcher_distribute_mode_get(enum rte_flow_table_hash_func hash_func)
515 : : {
516 [ # # ]: 0 : if (hash_func == RTE_FLOW_TABLE_HASH_FUNC_LINEAR)
517 : : return MLX5DR_MATCHER_DISTRIBUTE_BY_LINEAR;
518 : : else
519 : 0 : return MLX5DR_MATCHER_DISTRIBUTE_BY_HASH;
520 : : }
521 : :
522 : : /**
523 : : * Set the hash fields according to the @p rss_desc information.
524 : : *
525 : : * @param[in] rss_desc
526 : : * Pointer to the mlx5_flow_rss_desc.
527 : : * @param[out] hash_fields
528 : : * Pointer to the RSS hash fields.
529 : : */
530 : : static void
531 : 0 : flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
532 : : uint64_t *hash_fields)
533 : : {
534 : : uint64_t fields = 0;
535 : : int rss_inner = 0;
536 [ # # ]: 0 : uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
537 : :
538 : : #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
539 [ # # ]: 0 : if (rss_desc->level >= 2)
540 : : rss_inner = 1;
541 : : #endif
542 [ # # ]: 0 : if (rss_types & MLX5_IPV4_LAYER_TYPES) {
543 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
544 : : fields |= IBV_RX_HASH_SRC_IPV4;
545 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
546 : : fields |= IBV_RX_HASH_DST_IPV4;
547 : : else
548 : : fields |= MLX5_IPV4_IBV_RX_HASH;
549 [ # # ]: 0 : } else if (rss_types & MLX5_IPV6_LAYER_TYPES) {
550 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L3_SRC_ONLY)
551 : : fields |= IBV_RX_HASH_SRC_IPV6;
552 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L3_DST_ONLY)
553 : : fields |= IBV_RX_HASH_DST_IPV6;
554 : : else
555 : : fields |= MLX5_IPV6_IBV_RX_HASH;
556 : : }
557 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_UDP) {
558 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
559 : 0 : fields |= IBV_RX_HASH_SRC_PORT_UDP;
560 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
561 : 0 : fields |= IBV_RX_HASH_DST_PORT_UDP;
562 : : else
563 : 0 : fields |= MLX5_UDP_IBV_RX_HASH;
564 [ # # ]: 0 : } else if (rss_types & RTE_ETH_RSS_TCP) {
565 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_L4_SRC_ONLY)
566 : 0 : fields |= IBV_RX_HASH_SRC_PORT_TCP;
567 [ # # ]: 0 : else if (rss_types & RTE_ETH_RSS_L4_DST_ONLY)
568 : 0 : fields |= IBV_RX_HASH_DST_PORT_TCP;
569 : : else
570 : 0 : fields |= MLX5_TCP_IBV_RX_HASH;
571 : : }
572 [ # # ]: 0 : if (rss_types & RTE_ETH_RSS_ESP)
573 : 0 : fields |= IBV_RX_HASH_IPSEC_SPI;
574 [ # # ]: 0 : if (rss_inner)
575 : 0 : fields |= IBV_RX_HASH_INNER;
576 : 0 : *hash_fields |= fields;
577 : 0 : }
578 : :
579 : : RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_pmd_mlx5_rss_tir_register, 26.03)
580 : : int
581 : 0 : rte_pmd_mlx5_rss_tir_register(uint16_t port_id,
582 : : const struct rte_flow_action_rss *rss,
583 : : struct rte_pmd_mlx5_rss_devx *devx)
584 : : {
585 : : struct rte_eth_dev *dev;
586 : : struct mlx5_hrxq *hrxq;
587 : 0 : struct mlx5_flow_rss_desc rss_desc = {
588 : : .hws_flags = MLX5DR_ACTION_FLAG_ROOT_RX,
589 : : };
590 : :
591 [ # # ]: 0 : if (rte_eth_dev_is_valid_port(port_id) < 0) {
592 : 0 : DRV_LOG(ERR, "port %u: no Ethernet device", port_id);
593 : 0 : rte_errno = ENODEV;
594 : 0 : return -rte_errno;
595 : : }
596 [ # # # # ]: 0 : if (!rss->queue_num || !rss->queue) {
597 : 0 : DRV_LOG(ERR, "port %u: invalid RSS queues configuration", port_id);
598 : 0 : rte_errno = EINVAL;
599 : 0 : return -rte_errno;
600 : : }
601 [ # # # # ]: 0 : if (rss->key && rss->key_len != MLX5_RSS_HASH_KEY_LEN) {
602 : 0 : DRV_LOG(ERR, "port %u: RSS key length must be %d",
603 : : port_id, MLX5_RSS_HASH_KEY_LEN);
604 : 0 : rte_errno = EINVAL;
605 : 0 : return -rte_errno;
606 : : }
607 [ # # ]: 0 : dev = &rte_eth_devices[port_id];
608 [ # # ]: 0 : if (!mlx5_hws_active(dev)) {
609 : 0 : DRV_LOG(ERR, "port %u: HWS not active", port_id);
610 : 0 : rte_errno = EINVAL;
611 : 0 : return -rte_errno;
612 : : }
613 : 0 : rss_desc.queue_num = rss->queue_num;
614 : 0 : rss_desc.const_q = rss->queue;
615 [ # # ]: 0 : if (rss->queue_num > 1) {
616 [ # # ]: 0 : memcpy(rss_desc.key,
617 : : rss->key ? rss->key : mlx5_rss_hash_default_key,
618 : : MLX5_RSS_HASH_KEY_LEN);
619 : 0 : rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
620 [ # # ]: 0 : rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
621 : 0 : rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
622 : 0 : flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
623 : 0 : mlx5_flow_dv_action_rss_l34_hash_adjust(rss->types,
624 : : &rss_desc.hash_fields);
625 [ # # ]: 0 : if (rss->level > 1) {
626 : 0 : rss_desc.hash_fields |= IBV_RX_HASH_INNER;
627 : 0 : rss_desc.tunnel = 1;
628 : : }
629 : : }
630 : :
631 : 0 : hrxq = mlx5_hrxq_get(dev, &rss_desc);
632 [ # # ]: 0 : if (!hrxq) {
633 : 0 : DRV_LOG(ERR, "port %u: failed to allocate DevX", port_id);
634 : 0 : return -rte_errno;
635 : : }
636 : 0 : devx->destroy_handle = hrxq;
637 : 0 : devx->obj = hrxq->tir->obj;
638 : 0 : devx->id = hrxq->tir->id;
639 : :
640 : 0 : return 0;
641 : : }
642 : :
643 : : RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_pmd_mlx5_rss_tir_unregister, 26.03)
644 : : int
645 : 0 : rte_pmd_mlx5_rss_tir_unregister(uint16_t port_id, void *handle)
646 : : {
647 : : struct rte_eth_dev *dev;
648 : : struct mlx5_hrxq *hrxq = handle;
649 : :
650 [ # # ]: 0 : if (rte_eth_dev_is_valid_port(port_id) < 0) {
651 : 0 : DRV_LOG(ERR, "port %u: no Ethernet device", port_id);
652 : 0 : rte_errno = ENODEV;
653 : 0 : return -rte_errno;
654 : : }
655 : 0 : dev = &rte_eth_devices[port_id];
656 : 0 : mlx5_hrxq_obj_release(dev, hrxq);
657 : 0 : return 0;
658 : : }
659 : :
660 : : uint64_t
661 : 0 : mlx5_flow_hw_action_flags_get(const struct rte_flow_action actions[],
662 : : const struct rte_flow_action **qrss,
663 : : const struct rte_flow_action **mark,
664 : : int *encap_idx,
665 : : int *act_cnt,
666 : : struct rte_flow_error *error)
667 : : {
668 : : uint64_t action_flags = 0;
669 : : const struct rte_flow_action *action;
670 : : const struct rte_flow_action_raw_encap *raw_encap;
671 : : int raw_decap_idx = -1;
672 : : int action_idx;
673 : :
674 : 0 : *encap_idx = -1;
675 : : action_idx = 0;
676 [ # # ]: 0 : for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
677 : 0 : int type = (int)action->type;
678 [ # # # # : 0 : switch (type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
679 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
680 [ # # # # : 0 : switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
# # ]
681 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
682 : 0 : goto rss;
683 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
684 : 0 : goto age;
685 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
686 : 0 : goto count;
687 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
688 : 0 : goto ct;
689 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
690 : 0 : goto meter;
691 : 0 : default:
692 : 0 : goto error;
693 : : }
694 : : break;
695 : 0 : case RTE_FLOW_ACTION_TYPE_DROP:
696 : 0 : action_flags |= MLX5_FLOW_ACTION_DROP;
697 : 0 : break;
698 : 0 : case RTE_FLOW_ACTION_TYPE_FLAG:
699 : 0 : action_flags |= MLX5_FLOW_ACTION_FLAG;
700 : 0 : break;
701 : 0 : case RTE_FLOW_ACTION_TYPE_MARK:
702 : 0 : action_flags |= MLX5_FLOW_ACTION_MARK;
703 : 0 : *mark = action;
704 : 0 : break;
705 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
706 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
707 : 0 : break;
708 : 0 : case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
709 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
710 : 0 : break;
711 : 0 : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
712 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
713 : 0 : break;
714 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
715 : 0 : action_flags |= MLX5_FLOW_ACTION_JUMP;
716 : 0 : break;
717 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
718 : 0 : action_flags |= MLX5_FLOW_ACTION_QUEUE;
719 : 0 : *qrss = action;
720 : 0 : break;
721 : : case RTE_FLOW_ACTION_TYPE_RSS:
722 : 0 : rss:
723 : 0 : action_flags |= MLX5_FLOW_ACTION_RSS;
724 : 0 : *qrss = action;
725 : 0 : break;
726 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
727 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
728 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
729 : 0 : *encap_idx = action_idx;
730 : 0 : break;
731 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
732 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
733 : 0 : raw_encap = action->conf;
734 [ # # ]: 0 : if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
735 : 0 : *encap_idx = raw_decap_idx != -1 ?
736 [ # # ]: 0 : raw_decap_idx : action_idx;
737 : : break;
738 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
739 : : case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
740 : 0 : action_flags |= MLX5_FLOW_ACTION_DECAP;
741 : 0 : break;
742 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
743 : 0 : action_flags |= MLX5_FLOW_ACTION_DECAP;
744 : : raw_decap_idx = action_idx;
745 : 0 : break;
746 : 0 : case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
747 : 0 : action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
748 : 0 : break;
749 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
750 : 0 : action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
751 : 0 : break;
752 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_ID:
753 : : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
754 : 0 : action_flags |= MLX5_FLOW_ACTION_PORT_ID;
755 : 0 : break;
756 : : case RTE_FLOW_ACTION_TYPE_AGE:
757 : 0 : age:
758 : 0 : action_flags |= MLX5_FLOW_ACTION_AGE;
759 : 0 : break;
760 : : case RTE_FLOW_ACTION_TYPE_COUNT:
761 : 0 : count:
762 : 0 : action_flags |= MLX5_FLOW_ACTION_COUNT;
763 : 0 : break;
764 : : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
765 : 0 : ct:
766 : 0 : action_flags |= MLX5_FLOW_ACTION_CT;
767 : 0 : break;
768 : : case RTE_FLOW_ACTION_TYPE_METER_MARK:
769 : 0 : meter:
770 : 0 : action_flags |= MLX5_FLOW_ACTION_METER;
771 : 0 : break;
772 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
773 : 0 : action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
774 : 0 : break;
775 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
776 : 0 : action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX;
777 : 0 : break;
778 : 0 : case RTE_FLOW_ACTION_TYPE_SAMPLE:
779 : 0 : action_flags |= MLX5_FLOW_ACTION_SAMPLE;
780 : 0 : break;
781 : : case RTE_FLOW_ACTION_TYPE_VOID:
782 : : case RTE_FLOW_ACTION_TYPE_END:
783 : : break;
784 : 0 : default:
785 : 0 : goto error;
786 : : }
787 : 0 : action_idx++;
788 : : }
789 [ # # ]: 0 : if (*encap_idx == -1)
790 : 0 : *encap_idx = action_idx;
791 : 0 : action_idx++; /* The END action. */
792 : 0 : *act_cnt = action_idx;
793 : 0 : return action_flags;
794 : 0 : error:
795 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
796 : : action, "invalid flow action");
797 : 0 : return 0;
798 : : }
799 : :
800 : : /**
801 : : * Register destination table DR jump action.
802 : : *
803 : : * @param[in] dev
804 : : * Pointer to the rte_eth_dev structure.
805 : : * @param[in] table_attr
806 : : * Pointer to the flow attributes.
807 : : * @param[in] dest_group
808 : : * The destination group ID.
809 : : * @param[out] error
810 : : * Pointer to error structure.
811 : : *
812 : : * @return
813 : : * Table on success, NULL otherwise and rte_errno is set.
814 : : */
815 : : static struct mlx5_hw_jump_action *
816 : 0 : flow_hw_jump_action_register(struct rte_eth_dev *dev,
817 : : const struct mlx5_flow_template_table_cfg *cfg,
818 : : uint32_t dest_group,
819 : : struct rte_flow_error *error)
820 : : {
821 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
822 : 0 : struct rte_flow_attr jattr = cfg->attr.flow_attr;
823 : 0 : uint32_t specialize = cfg->attr.specialize;
824 : : struct mlx5_flow_group *grp;
825 : 0 : struct mlx5_flow_cb_ctx ctx = {
826 : : .dev = dev,
827 : : .error = error,
828 : : .data = &jattr,
829 : : .data2 = &specialize,
830 : : };
831 : : struct mlx5_list_entry *ge;
832 : : uint32_t target_group;
833 : :
834 : 0 : target_group = dest_group;
835 [ # # ]: 0 : if (flow_hw_translate_group(dev, cfg, dest_group, &target_group, error))
836 : : return NULL;
837 : 0 : jattr.group = target_group;
838 : 0 : ge = mlx5_hlist_register(priv->sh->flow_tbls, target_group, &ctx);
839 [ # # ]: 0 : if (!ge)
840 : : return NULL;
841 : : grp = container_of(ge, struct mlx5_flow_group, entry);
842 : 0 : return &grp->jump;
843 : : }
844 : :
845 : : /**
846 : : * Release jump action.
847 : : *
848 : : * @param[in] dev
849 : : * Pointer to the rte_eth_dev structure.
850 : : * @param[in] jump
851 : : * Pointer to the jump action.
852 : : */
853 : :
854 : : static void
855 : : flow_hw_jump_release(struct rte_eth_dev *dev, struct mlx5_hw_jump_action *jump)
856 : : {
857 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
858 : : struct mlx5_flow_group *grp;
859 : :
860 : 0 : grp = container_of(jump, struct mlx5_flow_group, jump);
861 : 0 : mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
862 : 0 : }
863 : :
864 : : /**
865 : : * Register queue/RSS action.
866 : : *
867 : : * @param[in] dev
868 : : * Pointer to the rte_eth_dev structure.
869 : : * @param[in] hws_flags
870 : : * DR action flags.
871 : : * @param[in] action
872 : : * rte flow action.
873 : : * @param[in] item_flags
874 : : * Item flags for non template rule.
875 : : * @param[in] is_template
876 : : * True if it is a template rule.
877 : : *
878 : : * @return
879 : : * Table on success, NULL otherwise and rte_errno is set.
880 : : */
881 : : static inline struct mlx5_hrxq*
882 : 0 : flow_hw_tir_action_register(struct rte_eth_dev *dev,
883 : : uint32_t hws_flags,
884 : : const struct rte_flow_action *action)
885 : : {
886 : 0 : struct mlx5_flow_rss_desc rss_desc = {
887 : : .hws_flags = hws_flags,
888 : : };
889 : : struct mlx5_hrxq *hrxq;
890 : :
891 [ # # ]: 0 : if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
892 : 0 : const struct rte_flow_action_queue *queue = action->conf;
893 : :
894 : 0 : rss_desc.const_q = &queue->index;
895 : 0 : rss_desc.queue_num = 1;
896 : : } else {
897 : 0 : const struct rte_flow_action_rss *rss = action->conf;
898 : :
899 : 0 : rss_desc.queue_num = rss->queue_num;
900 : 0 : rss_desc.const_q = rss->queue;
901 : 0 : memcpy(rss_desc.key,
902 [ # # ]: 0 : !rss->key ? mlx5_rss_hash_default_key : rss->key,
903 : : MLX5_RSS_HASH_KEY_LEN);
904 : 0 : rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
905 [ # # ]: 0 : rss_desc.types = !rss->types ? RTE_ETH_RSS_IP : rss->types;
906 : 0 : rss_desc.symmetric_hash_function = MLX5_RSS_IS_SYMM(rss->func);
907 : 0 : flow_hw_hashfields_set(&rss_desc, &rss_desc.hash_fields);
908 : 0 : mlx5_flow_dv_action_rss_l34_hash_adjust(rss->types,
909 : : &rss_desc.hash_fields);
910 [ # # ]: 0 : if (rss->level > 1) {
911 : 0 : rss_desc.hash_fields |= IBV_RX_HASH_INNER;
912 : 0 : rss_desc.tunnel = 1;
913 : : }
914 : : }
915 : 0 : hrxq = mlx5_hrxq_get(dev, &rss_desc);
916 : 0 : return hrxq;
917 : : }
918 : :
919 : : static __rte_always_inline int
920 : : flow_hw_ct_compile(struct rte_eth_dev *dev,
921 : : uint32_t queue, uint32_t idx,
922 : : struct mlx5dr_rule_action *rule_act)
923 : : {
924 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
925 : : struct mlx5_aso_ct_action *ct;
926 : :
927 : 0 : ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
928 [ # # # # : 0 : if (!ct || (!priv->shared_host && mlx5_aso_ct_available(priv->sh, queue, ct)))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # ]
929 : : return -1;
930 : 0 : rule_act->action = priv->hws_ctpool->dr_action;
931 : 0 : rule_act->aso_ct.offset = ct->offset;
932 : 0 : rule_act->aso_ct.direction = ct->is_original ?
933 : 0 : MLX5DR_ACTION_ASO_CT_DIRECTION_INITIATOR :
934 : : MLX5DR_ACTION_ASO_CT_DIRECTION_RESPONDER;
935 : : return 0;
936 : : }
937 : :
938 : : static void
939 : : flow_hw_template_destroy_reformat_action(struct mlx5_hw_encap_decap_action *encap_decap)
940 : : {
941 [ # # # # ]: 0 : if (encap_decap->action && !encap_decap->multi_pattern)
942 : 0 : mlx5dr_action_destroy(encap_decap->action);
943 : : }
944 : :
945 : : static void
946 : : flow_hw_template_destroy_mhdr_action(struct mlx5_hw_modify_header_action *mhdr)
947 : : {
948 [ # # # # ]: 0 : if (mhdr->action && !mhdr->multi_pattern)
949 : 0 : mlx5dr_action_destroy(mhdr->action);
950 : : }
951 : :
952 : : /**
953 : : * Destroy DR actions created by action template.
954 : : *
955 : : * For DR actions created during table creation's action translate.
956 : : * Need to destroy the DR action when destroying the table.
957 : : *
958 : : * @param[in] dev
959 : : * Pointer to the rte_eth_dev structure.
960 : : * @param[in] acts
961 : : * Pointer to the template HW steering DR actions.
962 : : */
963 : : static void
964 : 0 : __flow_hw_actions_release(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
965 : : {
966 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
967 : :
968 [ # # ]: 0 : if (acts->mark)
969 [ # # ]: 0 : if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
970 : : rte_memory_order_relaxed) - 1))
971 : 0 : mlx5_flow_hw_rxq_flag_set(dev, false);
972 : :
973 [ # # ]: 0 : if (acts->jump) {
974 : : struct mlx5_flow_group *grp;
975 : :
976 : 0 : grp = container_of
977 : : (acts->jump, struct mlx5_flow_group, jump);
978 : 0 : mlx5_hlist_unregister(priv->sh->flow_tbls, &grp->entry);
979 : 0 : acts->jump = NULL;
980 : : }
981 [ # # ]: 0 : if (acts->tir) {
982 : 0 : mlx5_hrxq_release(dev, acts->tir->idx);
983 : 0 : acts->tir = NULL;
984 : : }
985 [ # # ]: 0 : if (acts->encap_decap) {
986 : : flow_hw_template_destroy_reformat_action(acts->encap_decap);
987 : 0 : mlx5_free(acts->encap_decap);
988 : 0 : acts->encap_decap = NULL;
989 : : }
990 [ # # ]: 0 : if (acts->push_remove) {
991 [ # # ]: 0 : if (acts->push_remove->action)
992 : 0 : mlx5dr_action_destroy(acts->push_remove->action);
993 : 0 : mlx5_free(acts->push_remove);
994 : 0 : acts->push_remove = NULL;
995 : : }
996 [ # # ]: 0 : if (acts->mhdr) {
997 : : flow_hw_template_destroy_mhdr_action(acts->mhdr);
998 : 0 : mlx5_free(acts->mhdr);
999 : 0 : acts->mhdr = NULL;
1000 : : }
1001 [ # # ]: 0 : if (mlx5_hws_cnt_id_valid(acts->cnt_id)) {
1002 [ # # ]: 0 : mlx5_hws_cnt_shared_put(priv->hws_cpool, &acts->cnt_id);
1003 : 0 : acts->cnt_id = 0;
1004 : : }
1005 [ # # ]: 0 : if (acts->mtr_id) {
1006 : 0 : mlx5_ipool_free(priv->hws_mpool->idx_pool, acts->mtr_id);
1007 : 0 : acts->mtr_id = 0;
1008 : : }
1009 : 0 : }
1010 : :
1011 : : /**
1012 : : * Release the action data back into the pool without destroy any action.
1013 : : *
1014 : : * @param[in] dev
1015 : : * Pointer to the rte_eth_dev structure.
1016 : : * @param[in] acts
1017 : : * Pointer to the template HW steering DR actions.
1018 : : */
1019 : : static inline void
1020 : 0 : __flow_hw_act_data_flush(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
1021 : : {
1022 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1023 : : struct mlx5_action_construct_data *data;
1024 : :
1025 [ # # ]: 0 : while (!LIST_EMPTY(&acts->act_list)) {
1026 : : data = LIST_FIRST(&acts->act_list);
1027 [ # # ]: 0 : LIST_REMOVE(data, next);
1028 : 0 : mlx5_ipool_free(priv->acts_ipool, data->idx);
1029 : : }
1030 : 0 : }
1031 : :
1032 : : /*
1033 : : * Destroy DR actions created by action template.
1034 : : *
1035 : : * For DR actions created during table creation's action translate.
1036 : : * Need to destroy the DR action when destroying the table.
1037 : : *
1038 : : * @param[in] dev
1039 : : * Pointer to the rte_eth_dev structure.
1040 : : * @param[in] acts
1041 : : * Pointer to the template HW steering DR actions.
1042 : : */
1043 : : static void
1044 : 0 : __flow_hw_action_template_destroy(struct rte_eth_dev *dev, struct mlx5_hw_actions *acts)
1045 : : {
1046 : 0 : __flow_hw_act_data_flush(dev, acts);
1047 : 0 : __flow_hw_actions_release(dev, acts);
1048 : 0 : }
1049 : :
1050 : : /**
1051 : : * Append dynamic action to the dynamic action list.
1052 : : *
1053 : : * @param[in] priv
1054 : : * Pointer to the port private data structure.
1055 : : * @param[in] acts
1056 : : * Pointer to the template HW steering DR actions.
1057 : : * @param[in] type
1058 : : * Action type.
1059 : : * @param[in] action_src
1060 : : * Offset of source rte flow action.
1061 : : * @param[in] action_dst
1062 : : * Offset of destination DR action.
1063 : : *
1064 : : * @return
1065 : : * 0 on success, negative value otherwise and rte_errno is set.
1066 : : */
1067 : : static __rte_always_inline struct mlx5_action_construct_data *
1068 : : __flow_hw_act_data_alloc(struct mlx5_priv *priv,
1069 : : enum rte_flow_action_type type,
1070 : : uint16_t action_src,
1071 : : uint16_t action_dst)
1072 : : {
1073 : : struct mlx5_action_construct_data *act_data;
1074 : 0 : uint32_t idx = 0;
1075 : :
1076 : 0 : act_data = mlx5_ipool_zmalloc(priv->acts_ipool, &idx);
1077 [ # # # # : 0 : if (!act_data)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
1078 : : return NULL;
1079 : 0 : act_data->idx = idx;
1080 : 0 : act_data->type = type;
1081 : 0 : act_data->action_src = action_src;
1082 : 0 : act_data->action_dst = action_dst;
1083 : : return act_data;
1084 : : }
1085 : :
1086 : : /**
1087 : : * Append dynamic action to the dynamic action list.
1088 : : *
1089 : : * @param[in] priv
1090 : : * Pointer to the port private data structure.
1091 : : * @param[in] acts
1092 : : * Pointer to the template HW steering DR actions.
1093 : : * @param[in] type
1094 : : * Action type.
1095 : : * @param[in] action_src
1096 : : * Offset of source rte flow action.
1097 : : * @param[in] action_dst
1098 : : * Offset of destination DR action.
1099 : : *
1100 : : * @return
1101 : : * 0 on success, negative value otherwise and rte_errno is set.
1102 : : */
1103 : : static __rte_always_inline int
1104 : : __flow_hw_act_data_general_append(struct mlx5_priv *priv,
1105 : : struct mlx5_hw_actions *acts,
1106 : : enum rte_flow_action_type type,
1107 : : uint16_t action_src,
1108 : : uint16_t action_dst)
1109 : : {
1110 : : struct mlx5_action_construct_data *act_data;
1111 : :
1112 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1113 : : if (!act_data)
1114 : : return -1;
1115 [ # # # # : 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
1116 : : return 0;
1117 : : }
1118 : :
1119 : : static __rte_always_inline int
1120 : : __flow_hw_act_data_indirect_append(struct mlx5_priv *priv,
1121 : : struct mlx5_hw_actions *acts,
1122 : : enum rte_flow_action_type type,
1123 : : enum rte_flow_action_type mask_type,
1124 : : uint16_t action_src,
1125 : : uint16_t action_dst)
1126 : : {
1127 : : struct mlx5_action_construct_data *act_data;
1128 : :
1129 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1130 : : if (!act_data)
1131 : : return -1;
1132 : 0 : act_data->indirect.expected_type = mask_type;
1133 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1134 : : return 0;
1135 : : }
1136 : :
1137 : : static __rte_always_inline int
1138 : : flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
1139 : : struct mlx5_hw_actions *acts,
1140 : : enum rte_flow_action_type type,
1141 : : uint16_t action_src, uint16_t action_dst,
1142 : : indirect_list_callback_t cb)
1143 : : {
1144 : : struct mlx5_action_construct_data *act_data;
1145 : :
1146 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1147 : : if (!act_data)
1148 : 0 : return -1;
1149 : 0 : act_data->indirect_list_cb = cb;
1150 [ # # # # : 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
# # ]
1151 : 0 : return 0;
1152 : : }
1153 : : /**
1154 : : * Append dynamic encap action to the dynamic action list.
1155 : : *
1156 : : * @param[in] priv
1157 : : * Pointer to the port private data structure.
1158 : : * @param[in] acts
1159 : : * Pointer to the template HW steering DR actions.
1160 : : * @param[in] type
1161 : : * Action type.
1162 : : * @param[in] action_src
1163 : : * Offset of source rte flow action.
1164 : : * @param[in] action_dst
1165 : : * Offset of destination DR action.
1166 : : * @param[in] len
1167 : : * Length of the data to be updated.
1168 : : *
1169 : : * @return
1170 : : * 0 on success, negative value otherwise and rte_errno is set.
1171 : : */
1172 : : static __rte_always_inline int
1173 : : __flow_hw_act_data_encap_append(struct mlx5_priv *priv,
1174 : : struct mlx5_hw_actions *acts,
1175 : : enum rte_flow_action_type type,
1176 : : uint16_t action_src,
1177 : : uint16_t action_dst,
1178 : : uint16_t len)
1179 : : {
1180 : : struct mlx5_action_construct_data *act_data;
1181 : :
1182 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1183 : : if (!act_data)
1184 : : return -1;
1185 : 0 : act_data->encap.len = len;
1186 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1187 : : return 0;
1188 : : }
1189 : :
1190 : : /**
1191 : : * Append dynamic push action to the dynamic action list.
1192 : : *
1193 : : * @param[in] dev
1194 : : * Pointer to the port.
1195 : : * @param[in] acts
1196 : : * Pointer to the template HW steering DR actions.
1197 : : * @param[in] type
1198 : : * Action type.
1199 : : * @param[in] action_src
1200 : : * Offset of source rte flow action.
1201 : : * @param[in] action_dst
1202 : : * Offset of destination DR action.
1203 : : * @param[in] len
1204 : : * Length of the data to be updated.
1205 : : *
1206 : : * @return
1207 : : * Data pointer on success, NULL otherwise and rte_errno is set.
1208 : : */
1209 : : static __rte_always_inline void *
1210 : : __flow_hw_act_data_push_append(struct rte_eth_dev *dev,
1211 : : struct mlx5_hw_actions *acts,
1212 : : enum rte_flow_action_type type,
1213 : : uint16_t action_src,
1214 : : uint16_t action_dst,
1215 : : uint16_t len)
1216 : : {
1217 : : struct mlx5_action_construct_data *act_data;
1218 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1219 : :
1220 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1221 : : if (!act_data)
1222 : : return NULL;
1223 : 0 : act_data->ipv6_ext.len = len;
1224 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1225 : : return act_data;
1226 : : }
1227 : :
1228 : : static __rte_always_inline int
1229 : : __flow_hw_act_data_hdr_modify_append(struct mlx5_priv *priv,
1230 : : struct mlx5_hw_actions *acts,
1231 : : enum rte_flow_action_type type,
1232 : : uint16_t action_src,
1233 : : uint16_t action_dst,
1234 : : const struct rte_flow_action_modify_field *mf,
1235 : : uint16_t mhdr_cmds_off,
1236 : : uint16_t mhdr_cmds_end,
1237 : : bool shared,
1238 : : struct field_modify_info *field,
1239 : : struct field_modify_info *dcopy,
1240 : : uint32_t *mask)
1241 : : {
1242 : : struct mlx5_action_construct_data *act_data;
1243 : :
1244 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1245 : : if (!act_data)
1246 : : return -1;
1247 : 0 : act_data->modify_header.action = *mf;
1248 : 0 : act_data->modify_header.mhdr_cmds_off = mhdr_cmds_off;
1249 : 0 : act_data->modify_header.mhdr_cmds_end = mhdr_cmds_end;
1250 : 0 : act_data->modify_header.shared = shared;
1251 [ # # ]: 0 : rte_memcpy(act_data->modify_header.field, field,
1252 : : sizeof(*field) * MLX5_ACT_MAX_MOD_FIELDS);
1253 [ # # ]: 0 : rte_memcpy(act_data->modify_header.dcopy, dcopy,
1254 : : sizeof(*dcopy) * MLX5_ACT_MAX_MOD_FIELDS);
1255 [ # # ]: 0 : rte_memcpy(act_data->modify_header.mask, mask,
1256 : : sizeof(*mask) * MLX5_ACT_MAX_MOD_FIELDS);
1257 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1258 : : return 0;
1259 : : }
1260 : :
1261 : : /**
1262 : : * Append shared RSS action to the dynamic action list.
1263 : : *
1264 : : * @param[in] priv
1265 : : * Pointer to the port private data structure.
1266 : : * @param[in] acts
1267 : : * Pointer to the template HW steering DR actions.
1268 : : * @param[in] type
1269 : : * Action type.
1270 : : * @param[in] action_src
1271 : : * Offset of source rte flow action.
1272 : : * @param[in] action_dst
1273 : : * Offset of destination DR action.
1274 : : * @param[in] idx
1275 : : * Shared RSS index.
1276 : : * @param[in] rss
1277 : : * Pointer to the shared RSS info.
1278 : : *
1279 : : * @return
1280 : : * 0 on success, negative value otherwise and rte_errno is set.
1281 : : */
1282 : : static __rte_always_inline int
1283 : : __flow_hw_act_data_shared_rss_append(struct mlx5_priv *priv,
1284 : : struct mlx5_hw_actions *acts,
1285 : : enum rte_flow_action_type type,
1286 : : uint16_t action_src,
1287 : : uint16_t action_dst,
1288 : : uint32_t idx,
1289 : : struct mlx5_shared_action_rss *rss)
1290 : : {
1291 : : struct mlx5_action_construct_data *act_data;
1292 : :
1293 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1294 : : if (!act_data)
1295 : : return -1;
1296 : 0 : act_data->shared_rss.level = rss->origin.level;
1297 [ # # ]: 0 : act_data->shared_rss.types = !rss->origin.types ? RTE_ETH_RSS_IP :
1298 : : rss->origin.types;
1299 : 0 : act_data->shared_rss.idx = idx;
1300 : 0 : act_data->shared_rss.symmetric_hash_function =
1301 : 0 : MLX5_RSS_IS_SYMM(rss->origin.func);
1302 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1303 : : return 0;
1304 : : }
1305 : :
1306 : : /**
1307 : : * Append shared counter action to the dynamic action list.
1308 : : *
1309 : : * @param[in] priv
1310 : : * Pointer to the port private data structure.
1311 : : * @param[in] acts
1312 : : * Pointer to the template HW steering DR actions.
1313 : : * @param[in] type
1314 : : * Action type.
1315 : : * @param[in] action_src
1316 : : * Offset of source rte flow action.
1317 : : * @param[in] action_dst
1318 : : * Offset of destination DR action.
1319 : : * @param[in] cnt_id
1320 : : * Shared counter id.
1321 : : *
1322 : : * @return
1323 : : * 0 on success, negative value otherwise and rte_errno is set.
1324 : : */
1325 : : static __rte_always_inline int
1326 : : __flow_hw_act_data_shared_cnt_append(struct mlx5_priv *priv,
1327 : : struct mlx5_hw_actions *acts,
1328 : : enum rte_flow_action_type type,
1329 : : uint16_t action_src,
1330 : : uint16_t action_dst,
1331 : : cnt_id_t cnt_id)
1332 : : {
1333 : : struct mlx5_action_construct_data *act_data;
1334 : :
1335 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1336 : : if (!act_data)
1337 : : return -1;
1338 : : act_data->type = type;
1339 : 0 : act_data->shared_counter.id = cnt_id;
1340 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1341 : : return 0;
1342 : : }
1343 : :
1344 : : /**
1345 : : * Append shared meter_mark action to the dynamic action list.
1346 : : *
1347 : : * @param[in] priv
1348 : : * Pointer to the port private data structure.
1349 : : * @param[in] acts
1350 : : * Pointer to the template HW steering DR actions.
1351 : : * @param[in] type
1352 : : * Action type.
1353 : : * @param[in] action_src
1354 : : * Offset of source rte flow action.
1355 : : * @param[in] action_dst
1356 : : * Offset of destination DR action.
1357 : : * @param[in] mtr_id
1358 : : * Shared meter id.
1359 : : *
1360 : : * @return
1361 : : * 0 on success, negative value otherwise and rte_errno is set.
1362 : : */
1363 : : static __rte_always_inline int
1364 : : __flow_hw_act_data_shared_mtr_append(struct mlx5_priv *priv,
1365 : : struct mlx5_hw_actions *acts,
1366 : : enum rte_flow_action_type type,
1367 : : uint16_t action_src,
1368 : : uint16_t action_dst,
1369 : : cnt_id_t mtr_id)
1370 : : { struct mlx5_action_construct_data *act_data;
1371 : :
1372 : : act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
1373 : : if (!act_data)
1374 : : return -1;
1375 : : act_data->type = type;
1376 : 0 : act_data->shared_meter.id = mtr_id;
1377 [ # # ]: 0 : LIST_INSERT_HEAD(&acts->act_list, act_data, next);
1378 : : return 0;
1379 : : }
1380 : :
1381 : : /**
1382 : : * Translate shared indirect action.
1383 : : *
1384 : : * @param[in] dev
1385 : : * Pointer to the rte_eth_dev data structure.
1386 : : * @param[in] action
1387 : : * Pointer to the shared indirect rte_flow action.
1388 : : * @param[in] acts
1389 : : * Pointer to the template HW steering DR actions.
1390 : : * @param[in] action_src
1391 : : * Offset of source rte flow action.
1392 : : * @param[in] action_dst
1393 : : * Offset of destination DR action.
1394 : : *
1395 : : * @return
1396 : : * 0 on success, negative value otherwise and rte_errno is set.
1397 : : */
1398 : : static __rte_always_inline int
1399 : : flow_hw_shared_action_translate(struct rte_eth_dev *dev,
1400 : : const struct rte_flow_action *action,
1401 : : struct mlx5_hw_actions *acts,
1402 : : uint16_t action_src,
1403 : : uint16_t action_dst,
1404 : : struct rte_flow_error *error)
1405 : : {
1406 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1407 : : struct mlx5_shared_action_rss *shared_rss;
1408 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
1409 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1410 : 0 : uint32_t idx = act_idx &
1411 : : ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
1412 : :
1413 : 0 : switch (type) {
1414 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
1415 : 0 : shared_rss = mlx5_ipool_get
1416 : 0 : (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
1417 [ # # ]: 0 : if (!shared_rss || __flow_hw_act_data_shared_rss_append
1418 : : (priv, acts,
1419 : : (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS,
1420 : : action_src, action_dst, idx, shared_rss)) {
1421 : 0 : DRV_LOG(ERR, "port %u Indirect RSS action (handle %p) translate failed",
1422 : : dev->data->port_id, action->conf);
1423 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1424 : : action, "Indirect RSS action translate failed");
1425 : : }
1426 : : break;
1427 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
1428 : : if (__flow_hw_act_data_shared_cnt_append(priv, acts,
1429 : : (enum rte_flow_action_type)
1430 : : MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
1431 : : action_src, action_dst, act_idx)) {
1432 : 0 : DRV_LOG(ERR,
1433 : : "port %u Indirect count action (handle %p) "
1434 : : "translate failed",
1435 : : dev->data->port_id, action->conf);
1436 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1437 : : action,
1438 : : "Indirect count action translate failed");
1439 : : }
1440 : : break;
1441 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
1442 : : if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE,
1443 : : idx, &acts->rule_acts[action_dst])) {
1444 : 0 : DRV_LOG(ERR, "port %u Indirect CT action (handle %p) translate failed",
1445 : : dev->data->port_id, action->conf);
1446 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1447 : : action, "Indirect CT action translate failed");
1448 : : }
1449 : : break;
1450 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
1451 : : if (__flow_hw_act_data_shared_mtr_append(priv, acts,
1452 : : (enum rte_flow_action_type)
1453 : : MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
1454 : : action_src, action_dst, idx)) {
1455 : 0 : DRV_LOG(ERR,
1456 : : "port %u Indirect meter mark action (handle %p) "
1457 : : "translate failed",
1458 : : dev->data->port_id, action->conf);
1459 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1460 : : action,
1461 : : "Indirect meter mark action translate failed");
1462 : : }
1463 : : break;
1464 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
1465 : : flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
1466 : : break;
1467 : 0 : default:
1468 : 0 : DRV_LOG(ERR, "Unsupported shared action type: %d", type);
1469 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action,
1470 : : "Unsupported shared action type");
1471 : : }
1472 : : return 0;
1473 : : }
1474 : :
1475 : : static __rte_always_inline bool
1476 : : flow_hw_action_modify_field_is_shared(const struct rte_flow_action *action,
1477 : : const struct rte_flow_action *mask)
1478 : : {
1479 : : const struct rte_flow_action_modify_field *v = action->conf;
1480 : 0 : const struct rte_flow_action_modify_field *m = mask->conf;
1481 : :
1482 : 0 : if (v->src.field == RTE_FLOW_FIELD_VALUE) {
1483 : : uint32_t j;
1484 : :
1485 [ # # ]: 0 : for (j = 0; j < RTE_DIM(m->src.value); ++j) {
1486 : : /*
1487 : : * Immediate value is considered to be masked
1488 : : * (and thus shared by all flow rules), if mask
1489 : : * is non-zero. Partial mask over immediate value
1490 : : * is not allowed.
1491 : : */
1492 [ # # ]: 0 : if (m->src.value[j])
1493 : : return true;
1494 : : }
1495 : : return false;
1496 : : }
1497 [ # # ]: 0 : if (v->src.field == RTE_FLOW_FIELD_POINTER)
1498 : 0 : return m->src.pvalue != NULL;
1499 : : /*
1500 : : * Source field types other than VALUE and
1501 : : * POINTER are always shared.
1502 : : */
1503 : : return true;
1504 : : }
1505 : :
1506 : : static __rte_always_inline bool
1507 : : flow_hw_should_insert_nop(const struct mlx5_hw_modify_header_action *mhdr,
1508 : : const struct mlx5_modification_cmd *cmd,
1509 : : const struct rte_flow_attr *attr)
1510 : : {
1511 : : struct mlx5_modification_cmd last_cmd = { { 0 } };
1512 : : struct mlx5_modification_cmd new_cmd = { { 0 } };
1513 : 0 : const uint32_t cmds_num = mhdr->mhdr_cmds_num;
1514 : : unsigned int last_type;
1515 : : bool should_insert = false;
1516 : :
1517 : : /*
1518 : : * Modify header action list does not require NOPs in root table,
1519 : : * because different type of underlying object is used:
1520 : : * - in root table - MODIFY_HEADER_CONTEXT (does not require NOPs),
1521 : : * - in non-root - either inline modify action or based on Modify Header Pattern
1522 : : * (which requires NOPs).
1523 : : */
1524 [ # # # # ]: 0 : if (attr->group == 0)
1525 : : return false;
1526 [ # # # # ]: 0 : if (cmds_num == 0)
1527 : : return false;
1528 : 0 : last_cmd = *(&mhdr->mhdr_cmds[cmds_num - 1]);
1529 [ # # # # ]: 0 : last_cmd.data0 = rte_be_to_cpu_32(last_cmd.data0);
1530 [ # # # # ]: 0 : last_cmd.data1 = rte_be_to_cpu_32(last_cmd.data1);
1531 : 0 : last_type = last_cmd.action_type;
1532 : 0 : new_cmd = *cmd;
1533 [ # # # # ]: 0 : new_cmd.data0 = rte_be_to_cpu_32(new_cmd.data0);
1534 [ # # # # ]: 0 : new_cmd.data1 = rte_be_to_cpu_32(new_cmd.data1);
1535 [ # # # # : 0 : switch (new_cmd.action_type) {
# # ]
1536 : 0 : case MLX5_MODIFICATION_TYPE_SET:
1537 : : case MLX5_MODIFICATION_TYPE_ADD:
1538 [ # # # # ]: 0 : if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1539 : : last_type == MLX5_MODIFICATION_TYPE_ADD)
1540 : 0 : should_insert = new_cmd.field == last_cmd.field;
1541 : 0 : else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1542 [ # # # # ]: 0 : last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1543 : 0 : should_insert = new_cmd.field == last_cmd.dst_field;
1544 : : else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1545 : : should_insert = false;
1546 : : else
1547 : : MLX5_ASSERT(false); /* Other types are not supported. */
1548 : : break;
1549 : 0 : case MLX5_MODIFICATION_TYPE_COPY:
1550 : : case MLX5_MODIFICATION_TYPE_ADD_FIELD:
1551 [ # # # # ]: 0 : if (last_type == MLX5_MODIFICATION_TYPE_SET ||
1552 : : last_type == MLX5_MODIFICATION_TYPE_ADD)
1553 [ # # # # ]: 0 : should_insert = (new_cmd.field == last_cmd.field ||
1554 [ # # # # ]: 0 : new_cmd.dst_field == last_cmd.field);
1555 : 0 : else if (last_type == MLX5_MODIFICATION_TYPE_COPY ||
1556 [ # # # # ]: 0 : last_type == MLX5_MODIFICATION_TYPE_ADD_FIELD)
1557 [ # # # # ]: 0 : should_insert = (new_cmd.field == last_cmd.dst_field ||
1558 [ # # # # ]: 0 : new_cmd.dst_field == last_cmd.dst_field);
1559 : : else if (last_type == MLX5_MODIFICATION_TYPE_NOP)
1560 : : should_insert = false;
1561 : : else
1562 : : MLX5_ASSERT(false); /* Other types are not supported. */
1563 : : break;
1564 : : default:
1565 : : /* Other action types should be rejected on AT validation. */
1566 : : MLX5_ASSERT(false);
1567 : : break;
1568 : : }
1569 : : return should_insert;
1570 : : }
1571 : :
1572 : : static __rte_always_inline int
1573 : : flow_hw_mhdr_cmd_nop_append(struct mlx5_hw_modify_header_action *mhdr)
1574 : : {
1575 : : struct mlx5_modification_cmd *nop;
1576 : : uint32_t num = mhdr->mhdr_cmds_num;
1577 : :
1578 [ # # # # ]: 0 : if (num + 1 >= MLX5_MHDR_MAX_CMD)
1579 : : return -ENOMEM;
1580 : 0 : nop = mhdr->mhdr_cmds + num;
1581 : : nop->data0 = 0;
1582 : : nop->action_type = MLX5_MODIFICATION_TYPE_NOP;
1583 : 0 : nop->data0 = rte_cpu_to_be_32(nop->data0);
1584 : 0 : nop->data1 = 0;
1585 : 0 : mhdr->mhdr_cmds_num = num + 1;
1586 : : return 0;
1587 : : }
1588 : :
1589 : : static __rte_always_inline int
1590 : : flow_hw_mhdr_cmd_append(struct mlx5_hw_modify_header_action *mhdr,
1591 : : struct mlx5_modification_cmd *cmd)
1592 : : {
1593 : 0 : uint32_t num = mhdr->mhdr_cmds_num;
1594 : :
1595 [ # # ]: 0 : if (num + 1 >= MLX5_MHDR_MAX_CMD)
1596 : : return -ENOMEM;
1597 : 0 : mhdr->mhdr_cmds[num] = *cmd;
1598 : 0 : mhdr->mhdr_cmds_num = num + 1;
1599 : : return 0;
1600 : : }
1601 : :
1602 : : static __rte_always_inline int
1603 : : flow_hw_converted_mhdr_cmds_append(struct mlx5_hw_modify_header_action *mhdr,
1604 : : struct mlx5_flow_dv_modify_hdr_resource *resource,
1605 : : const struct rte_flow_attr *attr)
1606 : : {
1607 : : uint32_t idx;
1608 : : int ret;
1609 : :
1610 [ # # ]: 0 : for (idx = 0; idx < resource->actions_num; ++idx) {
1611 : : struct mlx5_modification_cmd *src = &resource->actions[idx];
1612 : :
1613 [ # # ]: 0 : if (flow_hw_should_insert_nop(mhdr, src, attr)) {
1614 : : ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1615 : : if (ret)
1616 : : return ret;
1617 : : }
1618 : : ret = flow_hw_mhdr_cmd_append(mhdr, src);
1619 : : if (ret)
1620 : : return ret;
1621 : : }
1622 : : return 0;
1623 : : }
1624 : :
1625 : : static __rte_always_inline void
1626 : : flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
1627 : : struct rte_flow_actions_template *at)
1628 : : {
1629 : : memset(mhdr, 0, sizeof(*mhdr));
1630 : : /* Modify header action without any commands is shared by default. */
1631 : 0 : mhdr->shared = true;
1632 : 0 : mhdr->pos = at->mhdr_off;
1633 : : }
1634 : :
1635 : : static __rte_always_inline int
1636 : : flow_hw_modify_field_compile(struct rte_eth_dev *dev,
1637 : : const struct rte_flow_attr *attr,
1638 : : const struct rte_flow_action *action, /* Current action from AT. */
1639 : : const struct rte_flow_action *action_mask, /* Current mask from AT. */
1640 : : struct mlx5_hw_actions *acts,
1641 : : struct mlx5_hw_modify_header_action *mhdr,
1642 : : uint16_t src_pos,
1643 : : struct rte_flow_error *error)
1644 : : {
1645 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1646 : 0 : const struct rte_flow_action_modify_field *conf = action->conf;
1647 : : union {
1648 : : struct mlx5_flow_dv_modify_hdr_resource resource;
1649 : : uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
1650 : : sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
1651 : : } dummy;
1652 : : struct mlx5_flow_dv_modify_hdr_resource *resource;
1653 : 0 : struct rte_flow_item item = {
1654 : : .spec = NULL,
1655 : : .mask = NULL
1656 : : };
1657 : 0 : struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS] = {
1658 : : {0, 0, MLX5_MODI_OUT_NONE} };
1659 : 0 : struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS] = {
1660 : : {0, 0, MLX5_MODI_OUT_NONE} };
1661 : 0 : uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS] = { 0 };
1662 : 0 : uint32_t type, value = 0;
1663 : : uint16_t cmds_start, cmds_end;
1664 : : bool shared;
1665 : : int ret;
1666 : :
1667 : : /*
1668 : : * Modify header action is shared if previous modify_field actions
1669 : : * are shared and currently compiled action is shared.
1670 : : */
1671 : : shared = flow_hw_action_modify_field_is_shared(action, action_mask);
1672 : 0 : mhdr->shared &= shared;
1673 [ # # ]: 0 : if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
1674 : : conf->src.field == RTE_FLOW_FIELD_VALUE) {
1675 [ # # ]: 0 : type = conf->operation == RTE_FLOW_MODIFY_SET ? MLX5_MODIFICATION_TYPE_SET :
1676 : : MLX5_MODIFICATION_TYPE_ADD;
1677 : : /* For SET/ADD fill the destination field (field) first. */
1678 : 0 : mlx5_flow_field_id_to_modify_info(&conf->dst, field, mask,
1679 : 0 : conf->width, dev,
1680 : : attr, error);
1681 : 0 : item.spec = conf->src.field == RTE_FLOW_FIELD_POINTER ?
1682 [ # # ]: 0 : (void *)(uintptr_t)conf->src.pvalue :
1683 : : (void *)(uintptr_t)&conf->src.value;
1684 [ # # ]: 0 : if (conf->dst.field == RTE_FLOW_FIELD_META ||
1685 : : conf->dst.field == RTE_FLOW_FIELD_TAG ||
1686 : : conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
1687 : 0 : conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
1688 : : uint8_t tag_index = flow_tag_index_get(&conf->dst);
1689 : :
1690 : 0 : value = *(const unaligned_uint32_t *)item.spec;
1691 [ # # # # ]: 0 : if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
1692 : : tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
1693 [ # # ]: 0 : value = rte_cpu_to_be_32(value << 16);
1694 : : else
1695 [ # # ]: 0 : value = rte_cpu_to_be_32(value);
1696 : 0 : item.spec = &value;
1697 : : } else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
1698 : : conf->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
1699 : : /*
1700 : : * Both QFI and Geneve option type are passed as an uint8_t integer,
1701 : : * but it is accessed through a 2nd least significant byte of a 32-bit
1702 : : * field in modify header command.
1703 : : */
1704 : 0 : value = *(const uint8_t *)item.spec;
1705 [ # # ]: 0 : value = rte_cpu_to_be_32(value << 8);
1706 : 0 : item.spec = &value;
1707 : : } else if (conf->dst.field == RTE_FLOW_FIELD_VXLAN_LAST_RSVD) {
1708 : 0 : value = *(const uint8_t *)item.spec << 24;
1709 [ # # ]: 0 : value = rte_cpu_to_be_32(value);
1710 : 0 : item.spec = &value;
1711 : 0 : } else if (conf->dst.field == RTE_FLOW_FIELD_IPV6_DSCP &&
1712 [ # # # # ]: 0 : !(mask[0] & MLX5_IPV6_HDR_ECN_MASK) &&
1713 [ # # ]: 0 : mlx5_dv_modify_ipv6_traffic_class_supported(dev->data->dev_private)) {
1714 : 0 : value = *(const unaligned_uint32_t *)item.spec << MLX5_IPV6_HDR_DSCP_SHIFT;
1715 : 0 : item.spec = &value;
1716 : : }
1717 : : } else {
1718 : 0 : type = conf->operation == RTE_FLOW_MODIFY_SET ?
1719 [ # # ]: 0 : MLX5_MODIFICATION_TYPE_COPY : MLX5_MODIFICATION_TYPE_ADD_FIELD;
1720 : : /* For COPY fill the destination field (dcopy) without mask. */
1721 : 0 : mlx5_flow_field_id_to_modify_info(&conf->dst, dcopy, NULL,
1722 : 0 : conf->width, dev,
1723 : : attr, error);
1724 : : /* Then construct the source field (field) with mask. */
1725 : 0 : mlx5_flow_field_id_to_modify_info(&conf->src, field, mask,
1726 : 0 : conf->width, dev,
1727 : : attr, error);
1728 : : }
1729 : 0 : item.mask = &mask;
1730 : : memset(&dummy, 0, sizeof(dummy));
1731 : : resource = &dummy.resource;
1732 : 0 : ret = mlx5_flow_dv_convert_modify_action(&item, field, dcopy, resource, type, error);
1733 [ # # ]: 0 : if (ret)
1734 : : return ret;
1735 : : MLX5_ASSERT(resource->actions_num > 0);
1736 : : /*
1737 : : * If previous modify field action collide with this one, then insert NOP command.
1738 : : * This NOP command will not be a part of action's command range used to update commands
1739 : : * on rule creation.
1740 : : */
1741 [ # # ]: 0 : if (flow_hw_should_insert_nop(mhdr, &resource->actions[0], attr)) {
1742 : : ret = flow_hw_mhdr_cmd_nop_append(mhdr);
1743 : : if (ret)
1744 : 0 : return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1745 : : NULL, "too many modify field operations specified");
1746 : : }
1747 : 0 : cmds_start = mhdr->mhdr_cmds_num;
1748 : : ret = flow_hw_converted_mhdr_cmds_append(mhdr, resource, attr);
1749 [ # # ]: 0 : if (ret)
1750 : 0 : return rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1751 : : NULL, "too many modify field operations specified");
1752 : :
1753 : 0 : cmds_end = mhdr->mhdr_cmds_num;
1754 [ # # ]: 0 : if (shared)
1755 : : return 0;
1756 : : ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
1757 : 0 : src_pos, mhdr->pos, conf,
1758 : : cmds_start, cmds_end, shared,
1759 : : field, dcopy, mask);
1760 : : if (ret)
1761 : 0 : return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1762 : : NULL, "not enough memory to store modify field metadata");
1763 : : return 0;
1764 : : }
1765 : :
1766 : : static uint32_t
1767 : 0 : flow_hw_count_nop_modify_field(struct mlx5_hw_modify_header_action *mhdr)
1768 : : {
1769 : : uint32_t i;
1770 : : uint32_t nops = 0;
1771 : :
1772 [ # # ]: 0 : for (i = 0; i < mhdr->mhdr_cmds_num; ++i) {
1773 : 0 : struct mlx5_modification_cmd cmd = mhdr->mhdr_cmds[i];
1774 : :
1775 [ # # ]: 0 : cmd.data0 = rte_be_to_cpu_32(cmd.data0);
1776 [ # # ]: 0 : if (cmd.action_type == MLX5_MODIFICATION_TYPE_NOP)
1777 : 0 : ++nops;
1778 : : }
1779 : 0 : return nops;
1780 : : }
1781 : :
1782 : : static int
1783 : 0 : flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
1784 : : const struct mlx5_flow_template_table_cfg *cfg,
1785 : : struct mlx5_hw_modify_header_action *mhdr,
1786 : : struct rte_flow_error *error)
1787 : : {
1788 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1789 : 0 : struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
1790 : :
1791 : : /*
1792 : : * Header modify pattern length limitation is only valid for HWS groups, i.e. groups > 0.
1793 : : * In group 0, MODIFY_FIELD actions are handled with header modify actions
1794 : : * managed by rdma-core.
1795 : : */
1796 [ # # ]: 0 : if (cfg->attr.flow_attr.group != 0 &&
1797 [ # # ]: 0 : mhdr->mhdr_cmds_num > hca_attr->max_header_modify_pattern_length) {
1798 : 0 : uint32_t nops = flow_hw_count_nop_modify_field(mhdr);
1799 : :
1800 : 0 : DRV_LOG(ERR, "Too many modify header commands generated from "
1801 : : "MODIFY_FIELD actions. "
1802 : : "Generated HW commands = %u (amount of NOP commands = %u). "
1803 : : "Maximum supported = %u.",
1804 : : mhdr->mhdr_cmds_num, nops,
1805 : : hca_attr->max_header_modify_pattern_length);
1806 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1807 : : "Number of MODIFY_FIELD actions exceeds maximum "
1808 : : "supported limit of actions");
1809 : : }
1810 : : return 0;
1811 : : }
1812 : :
1813 : : static int
1814 : 0 : flow_hw_represented_port_compile(struct rte_eth_dev *dev,
1815 : : const struct rte_flow_attr *attr,
1816 : : const struct rte_flow_action *action,
1817 : : const struct rte_flow_action *action_mask,
1818 : : struct mlx5_hw_actions *acts,
1819 : : uint16_t action_src, uint16_t action_dst,
1820 : : struct rte_flow_error *error)
1821 : : {
1822 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1823 : 0 : const struct rte_flow_action_ethdev *v = action->conf;
1824 : 0 : const struct rte_flow_action_ethdev *m = action_mask->conf;
1825 : : int ret;
1826 : :
1827 [ # # ]: 0 : if (!attr->group)
1828 : 0 : return rte_flow_error_set(error, EINVAL,
1829 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1830 : : "represented_port action cannot"
1831 : : " be used on group 0");
1832 [ # # ]: 0 : if (!attr->transfer)
1833 : 0 : return rte_flow_error_set(error, EINVAL,
1834 : : RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1835 : : NULL,
1836 : : "represented_port action requires"
1837 : : " transfer attribute");
1838 [ # # ]: 0 : if (attr->ingress || attr->egress)
1839 : 0 : return rte_flow_error_set(error, EINVAL,
1840 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1841 : : "represented_port action cannot"
1842 : : " be used with direction attributes");
1843 [ # # ]: 0 : if (!priv->master)
1844 : 0 : return rte_flow_error_set(error, EINVAL,
1845 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1846 : : "represented_port action must"
1847 : : " be used on proxy port");
1848 [ # # # # ]: 0 : if (m && !!m->port_id) {
1849 : : struct mlx5_priv *port_priv;
1850 : :
1851 [ # # ]: 0 : if (!v)
1852 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1853 : : action, "port index was not provided");
1854 : 0 : port_priv = mlx5_port_to_eswitch_info(v->port_id, false);
1855 [ # # ]: 0 : if (port_priv == NULL)
1856 : 0 : return rte_flow_error_set
1857 : : (error, EINVAL,
1858 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1859 : : "port does not exist or unable to"
1860 : : " obtain E-Switch info for port");
1861 : : MLX5_ASSERT(priv->hw_vport != NULL);
1862 [ # # ]: 0 : if (priv->hw_vport[v->port_id]) {
1863 : 0 : acts->rule_acts[action_dst].action =
1864 : : priv->hw_vport[v->port_id];
1865 : : } else {
1866 : 0 : return rte_flow_error_set
1867 : : (error, EINVAL,
1868 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1869 : : "cannot use represented_port action"
1870 : : " with this port");
1871 : : }
1872 : : } else {
1873 : : ret = __flow_hw_act_data_general_append
1874 : 0 : (priv, acts, action->type,
1875 : : action_src, action_dst);
1876 : : if (ret)
1877 : 0 : return rte_flow_error_set
1878 : : (error, ENOMEM,
1879 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1880 : : "not enough memory to store"
1881 : : " vport action");
1882 : : }
1883 : : return 0;
1884 : : }
1885 : :
1886 : : static __rte_always_inline int
1887 : : flow_hw_cnt_compile(struct rte_eth_dev *dev, uint32_t start_pos,
1888 : : struct mlx5_hw_actions *acts, bool is_root)
1889 : : {
1890 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1891 : : uint32_t pos = start_pos;
1892 : : cnt_id_t cnt_id;
1893 : : int ret;
1894 : :
1895 : 0 : ret = mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0);
1896 : : if (ret != 0)
1897 : : return ret;
1898 : 0 : ret = mlx5_hws_cnt_pool_get_action_offset
1899 : : (priv->hws_cpool,
1900 : : cnt_id,
1901 : : &acts->rule_acts[pos].action,
1902 : : &acts->rule_acts[pos].counter.offset,
1903 : : is_root);
1904 : : if (ret != 0)
1905 : : return ret;
1906 : 0 : acts->cnt_id = cnt_id;
1907 : : return 0;
1908 : : }
1909 : :
1910 : : static __rte_always_inline bool
1911 : : is_of_vlan_pcp_present(const struct rte_flow_action *actions)
1912 : : {
1913 : : /*
1914 : : * Order of RTE VLAN push actions is
1915 : : * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
1916 : : */
1917 : 0 : return actions[MLX5_HW_VLAN_PUSH_PCP_IDX].type ==
1918 : : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP;
1919 : : }
1920 : :
1921 : : static __rte_always_inline bool
1922 : : is_template_masked_push_vlan(const struct rte_flow_action_of_push_vlan *mask)
1923 : : {
1924 : : /*
1925 : : * In masked push VLAN template all RTE push actions are masked.
1926 : : */
1927 [ # # ]: 0 : return mask && mask->ethertype != 0;
1928 : : }
1929 : :
1930 : 0 : static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions)
1931 : : {
1932 : : /*
1933 : : * OpenFlow Switch Specification defines 801.1q VID as 12+1 bits.
1934 : : */
1935 : : rte_be32_t type, vid, pcp;
1936 : : #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1937 : : rte_be32_t vid_lo, vid_hi;
1938 : : #endif
1939 : :
1940 : 0 : type = ((const struct rte_flow_action_of_push_vlan *)
1941 : 0 : actions[MLX5_HW_VLAN_PUSH_TYPE_IDX].conf)->ethertype;
1942 : 0 : vid = ((const struct rte_flow_action_of_set_vlan_vid *)
1943 : 0 : actions[MLX5_HW_VLAN_PUSH_VID_IDX].conf)->vlan_vid;
1944 : : pcp = is_of_vlan_pcp_present(actions) ?
1945 : : ((const struct rte_flow_action_of_set_vlan_pcp *)
1946 [ # # ]: 0 : actions[MLX5_HW_VLAN_PUSH_PCP_IDX].conf)->vlan_pcp : 0;
1947 : : #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1948 : 0 : vid_hi = vid & 0xff;
1949 : : vid_lo = vid >> 8;
1950 : 0 : return (((vid_lo << 8) | (pcp << 5) | vid_hi) << 16) | type;
1951 : : #else
1952 : : return (type << 16) | (pcp << 13) | vid;
1953 : : #endif
1954 : : }
1955 : :
1956 : : static __rte_always_inline int
1957 : : flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue,
1958 : : const struct rte_flow_action *action,
1959 : : struct mlx5_hw_q_job *job, bool push,
1960 : : struct mlx5_aso_mtr **aso_mtr,
1961 : : struct rte_flow_error *error)
1962 : : {
1963 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1964 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
1965 : 0 : const struct rte_flow_action_meter_mark *meter_mark = action->conf;
1966 : : struct mlx5_flow_meter_info *fm;
1967 : 0 : uint32_t mtr_id = 0;
1968 : : uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK <<
1969 : : MLX5_INDIRECT_ACTION_TYPE_OFFSET;
1970 : :
1971 : 0 : if (priv->shared_host)
1972 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1973 : : "Meter mark actions can only be created on the host port");
1974 : : MLX5_ASSERT(aso_mtr);
1975 [ # # # # : 0 : if (meter_mark->profile == NULL)
# # # # #
# # # #
# ]
1976 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1977 : : "No Meter mark profile");
1978 : :
1979 : 0 : *aso_mtr = mlx5_ipool_malloc(pool->idx_pool, &mtr_id);
1980 [ # # # # : 0 : if (!*aso_mtr) {
# # # # #
# # # #
# ]
1981 [ # # # # : 0 : if (mtr_id)
# # # # #
# # # #
# ]
1982 : 0 : mlx5_ipool_free(pool->idx_pool, mtr_id);
1983 : 0 : return rte_flow_error_set(error, ENOMEM,
1984 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1985 : : NULL,
1986 : : "failed to allocate aso meter entry");
1987 : : }
1988 : : /* Fill the flow meter parameters. */
1989 : 0 : (*aso_mtr)->type = ASO_METER_INDIRECT;
1990 : : fm = &(*aso_mtr)->fm;
1991 : 0 : fm->meter_id = mtr_id;
1992 : 0 : fm->profile = (struct mlx5_flow_meter_profile *)(meter_mark->profile);
1993 : 0 : fm->is_enable = meter_mark->state;
1994 : 0 : fm->color_aware = meter_mark->color_mode;
1995 : 0 : (*aso_mtr)->pool = pool;
1996 [ # # ]: 0 : (*aso_mtr)->state = (queue == MLX5_HW_INV_QUEUE) ?
1997 : : ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
1998 : 0 : (*aso_mtr)->offset = mtr_id - 1;
1999 [ # # # # : 0 : (*aso_mtr)->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN;
# # # # #
# # # #
# ]
2000 : 0 : job->action = (void *)(handle | mtr_id);
2001 : : /* Update ASO flow meter by wqe. */
2002 [ # # # # : 0 : if (mlx5_aso_meter_update_by_wqe(priv, queue, *aso_mtr,
# # # # #
# # # #
# ]
2003 : : &priv->mtr_bulk, job, push)) {
2004 : 0 : mlx5_ipool_free(pool->idx_pool, mtr_id);
2005 : 0 : return rte_flow_error_set(error, EBUSY,
2006 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2007 : : NULL,
2008 : : "Failed to enqueue ASO meter update");
2009 : : }
2010 : : /* Wait for ASO object completion. */
2011 [ # # # # : 0 : if (queue == MLX5_HW_INV_QUEUE &&
# # # # #
# # # # #
# # ]
2012 : 0 : mlx5_aso_mtr_wait(priv, *aso_mtr, true)) {
2013 : 0 : mlx5_ipool_free(pool->idx_pool, mtr_id);
2014 : : return -EIO;
2015 : : }
2016 : : return 0;
2017 : : }
2018 : :
2019 : : static __rte_always_inline int
2020 : : flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
2021 : : uint16_t aso_mtr_pos,
2022 : : const struct rte_flow_action *action,
2023 : : struct mlx5dr_rule_action *acts,
2024 : : uint32_t *index,
2025 : : uint32_t queue,
2026 : : struct rte_flow_error *error)
2027 : : {
2028 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2029 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
2030 : : struct mlx5_aso_mtr *aso_mtr = NULL;
2031 : : struct mlx5_hw_q_job *job =
2032 : : flow_hw_action_job_init(priv, queue, NULL, NULL, NULL,
2033 : : MLX5_HW_Q_JOB_TYPE_CREATE,
2034 : : MLX5_HW_INDIRECT_TYPE_LEGACY, NULL);
2035 : : int ret;
2036 : :
2037 : : if (!job)
2038 : : return -1;
2039 : : ret = flow_hw_meter_mark_alloc(dev, queue, action, job, true, &aso_mtr, error);
2040 [ # # # # : 0 : if (ret) {
# # # # #
# # # ]
2041 [ # # # # : 0 : if (ret != -EIO) {
# # # # #
# # # ]
2042 : : if (queue == MLX5_HW_INV_QUEUE)
2043 : 0 : queue = CTRL_QUEUE_ID(priv);
2044 : : flow_hw_job_put(priv, job, queue);
2045 : : }
2046 : : return -1;
2047 : : }
2048 : :
2049 : : /* Compile METER_MARK action */
2050 : 0 : acts[aso_mtr_pos].action = pool->action;
2051 : 0 : acts[aso_mtr_pos].aso_meter.offset = aso_mtr->offset;
2052 : 0 : *index = aso_mtr->fm.meter_id;
2053 : : return 0;
2054 : : }
2055 : :
2056 : : static int
2057 : 0 : flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
2058 : : __rte_unused const struct mlx5_action_construct_data *act_data,
2059 : : const struct rte_flow_action *action,
2060 : : struct mlx5dr_rule_action *dr_rule)
2061 : : {
2062 : 0 : const struct rte_flow_action_indirect_list *list_conf = action->conf;
2063 : 0 : const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
2064 : :
2065 : 0 : dr_rule->action = mirror->mirror_action;
2066 : 0 : return 0;
2067 : : }
2068 : :
2069 : : /**
2070 : : * HWS mirror implemented as FW island.
2071 : : * The action does not support indirect list flow configuration.
2072 : : * If template handle was masked, use handle mirror action in flow rules.
2073 : : * Otherwise let flow rule specify mirror handle.
2074 : : */
2075 : : static int
2076 : 0 : hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
2077 : : const struct rte_flow_action *action,
2078 : : const struct rte_flow_action *mask,
2079 : : struct mlx5_hw_actions *acts,
2080 : : uint16_t action_src, uint16_t action_dst)
2081 : : {
2082 : : int ret = 0;
2083 : 0 : const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2084 : :
2085 [ # # # # ]: 0 : if (mask_conf && mask_conf->handle) {
2086 : : /**
2087 : : * If mirror handle was masked, assign fixed DR5 mirror action.
2088 : : */
2089 : : flow_hw_translate_indirect_mirror(dev, NULL, action,
2090 : 0 : &acts->rule_acts[action_dst]);
2091 : : } else {
2092 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2093 : : ret = flow_hw_act_data_indirect_list_append
2094 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2095 : : action_src, action_dst,
2096 : : flow_hw_translate_indirect_mirror);
2097 : : }
2098 : :
2099 : 0 : return ret;
2100 : : }
2101 : :
2102 : : static int
2103 : 0 : flow_hw_reformat_action(__rte_unused struct rte_eth_dev *dev,
2104 : : __rte_unused const struct mlx5_action_construct_data *data,
2105 : : const struct rte_flow_action *action,
2106 : : struct mlx5dr_rule_action *dr_rule)
2107 : : {
2108 : 0 : const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2109 : :
2110 : 0 : dr_rule->action = ((struct mlx5_hw_encap_decap_action *)
2111 : 0 : (indlst_conf->handle))->action;
2112 [ # # ]: 0 : if (!dr_rule->action)
2113 : 0 : return -EINVAL;
2114 : : return 0;
2115 : : }
2116 : :
2117 : : /**
2118 : : * Template conf must not be masked. If handle is masked, use the one in template,
2119 : : * otherwise update per flow rule.
2120 : : */
2121 : : static int
2122 : 0 : hws_table_tmpl_translate_indirect_reformat(struct rte_eth_dev *dev,
2123 : : const struct rte_flow_action *action,
2124 : : const struct rte_flow_action *mask,
2125 : : struct mlx5_hw_actions *acts,
2126 : : uint16_t action_src, uint16_t action_dst)
2127 : : {
2128 : : int ret = -1;
2129 : 0 : const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2130 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2131 : :
2132 [ # # # # : 0 : if (mask_conf && mask_conf->handle && !mask_conf->conf)
# # ]
2133 : : /**
2134 : : * If handle was masked, assign fixed DR action.
2135 : : */
2136 : : ret = flow_hw_reformat_action(dev, NULL, action,
2137 [ # # ]: 0 : &acts->rule_acts[action_dst]);
2138 [ # # # # : 0 : else if (mask_conf && !mask_conf->handle && !mask_conf->conf)
# # ]
2139 : : ret = flow_hw_act_data_indirect_list_append
2140 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2141 : : action_src, action_dst, flow_hw_reformat_action);
2142 : 0 : return ret;
2143 : : }
2144 : :
2145 : : static int
2146 : 0 : flow_dr_set_meter(struct mlx5_priv *priv,
2147 : : struct mlx5dr_rule_action *dr_rule,
2148 : : const struct rte_flow_action_indirect_list *action_conf)
2149 : : {
2150 : 0 : const struct mlx5_indlst_legacy *legacy_obj =
2151 : : (typeof(legacy_obj))action_conf->handle;
2152 : 0 : struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
2153 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
2154 : 0 : uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
2155 : 0 : struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
2156 : :
2157 [ # # ]: 0 : if (!aso_mtr)
2158 : : return -EINVAL;
2159 : 0 : dr_rule->action = mtr_pool->action;
2160 : 0 : dr_rule->aso_meter.offset = aso_mtr->offset;
2161 : 0 : return 0;
2162 : : }
2163 : :
2164 : : __rte_always_inline static void
2165 : : flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
2166 : : {
2167 : 0 : dr_rule->aso_meter.init_color =
2168 : 0 : (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
2169 : 0 : }
2170 : :
2171 : : static int
2172 : 0 : flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
2173 : : const struct mlx5_action_construct_data *act_data,
2174 : : const struct rte_flow_action *action,
2175 : : struct mlx5dr_rule_action *dr_rule)
2176 : : {
2177 : : int ret;
2178 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2179 : 0 : const struct rte_flow_action_indirect_list *action_conf = action->conf;
2180 : 0 : const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
2181 : : (typeof(flow_conf))action_conf->conf;
2182 : :
2183 : 0 : ret = flow_dr_set_meter(priv, dr_rule, action_conf);
2184 [ # # ]: 0 : if (ret)
2185 : : return ret;
2186 [ # # ]: 0 : if (!act_data->shared_meter.conf_masked) {
2187 [ # # # # : 0 : if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
# # ]
2188 : : flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
2189 : : }
2190 : : return 0;
2191 : : }
2192 : :
2193 : : static int
2194 : 0 : hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
2195 : : const struct rte_flow_action *action,
2196 : : const struct rte_flow_action *mask,
2197 : : struct mlx5_hw_actions *acts,
2198 : : uint16_t action_src, uint16_t action_dst)
2199 : : {
2200 : : int ret;
2201 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2202 : 0 : const struct rte_flow_action_indirect_list *action_conf = action->conf;
2203 : 0 : const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
2204 [ # # # # ]: 0 : bool is_handle_masked = mask_conf && mask_conf->handle;
2205 [ # # # # : 0 : bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
# # ]
2206 : 0 : struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
2207 : :
2208 [ # # ]: 0 : if (is_handle_masked) {
2209 : 0 : ret = flow_dr_set_meter(priv, dr_rule, action->conf);
2210 [ # # ]: 0 : if (ret)
2211 : : return ret;
2212 : : }
2213 [ # # ]: 0 : if (is_conf_masked) {
2214 : : const struct
2215 : 0 : rte_flow_indirect_update_flow_meter_mark **flow_conf =
2216 : : (typeof(flow_conf))action_conf->conf;
2217 : : flow_dr_mtr_flow_color(dr_rule,
2218 [ # # ]: 0 : flow_conf[0]->init_color);
2219 : : }
2220 [ # # ]: 0 : if (!is_handle_masked || !is_conf_masked) {
2221 : : struct mlx5_action_construct_data *act_data;
2222 : :
2223 : : ret = flow_hw_act_data_indirect_list_append
2224 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
2225 : : action_src, action_dst, flow_hw_translate_indirect_meter);
2226 : : if (ret)
2227 : 0 : return ret;
2228 : : act_data = LIST_FIRST(&acts->act_list);
2229 : 0 : act_data->shared_meter.conf_masked = is_conf_masked;
2230 : : }
2231 : : return 0;
2232 : : }
2233 : :
2234 : : static int
2235 : : hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
2236 : : const struct rte_flow_action *action,
2237 : : const struct rte_flow_action *mask,
2238 : : struct mlx5_hw_actions *acts,
2239 : : uint16_t action_src, uint16_t action_dst)
2240 : : {
2241 : : int ret;
2242 : : const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
2243 : : struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
2244 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
2245 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
2246 : :
2247 : 0 : switch (type) {
2248 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
2249 : 0 : ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
2250 : : acts, action_src,
2251 : : action_dst);
2252 : 0 : break;
2253 : : default:
2254 : : ret = -EINVAL;
2255 : : break;
2256 : : }
2257 : : return ret;
2258 : : }
2259 : :
2260 : : /*
2261 : : * template .. indirect_list handle Ht conf Ct ..
2262 : : * mask .. indirect_list handle Hm conf Cm ..
2263 : : *
2264 : : * PMD requires Ht != 0 to resolve handle type.
2265 : : * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
2266 : : * not change. Otherwise, DR5 action will be resolved during flow rule build.
2267 : : * If Ct was masked (Cm != 0), table template processing updates base
2268 : : * indirect action configuration with Ct parameters.
2269 : : */
2270 : : static int
2271 : 0 : table_template_translate_indirect_list(struct rte_eth_dev *dev,
2272 : : const struct rte_flow_action *action,
2273 : : const struct rte_flow_action *mask,
2274 : : struct mlx5_hw_actions *acts,
2275 : : uint16_t action_src, uint16_t action_dst)
2276 : : {
2277 : : int ret = 0;
2278 : : enum mlx5_indirect_list_type type;
2279 : 0 : const struct rte_flow_action_indirect_list *list_conf = action->conf;
2280 : :
2281 [ # # # # ]: 0 : if (!list_conf || !list_conf->handle)
2282 : : return -EINVAL;
2283 : : type = mlx5_get_indirect_list_type(list_conf->handle);
2284 [ # # # # ]: 0 : switch (type) {
2285 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
2286 [ # # ]: 0 : ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
2287 : : acts, action_src,
2288 : : action_dst);
2289 : : break;
2290 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
2291 : 0 : ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
2292 : : acts, action_src,
2293 : : action_dst);
2294 : 0 : break;
2295 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
2296 [ # # ]: 0 : if (list_conf->conf)
2297 : : return -EINVAL;
2298 : 0 : ret = hws_table_tmpl_translate_indirect_reformat(dev, action, mask,
2299 : : acts, action_src,
2300 : : action_dst);
2301 : 0 : break;
2302 : : default:
2303 : : return -EINVAL;
2304 : : }
2305 : : return ret;
2306 : : }
2307 : :
2308 : : /**
2309 : : * Translate given encap action and mask to raw tunnel header buffer.
2310 : : *
2311 : : * @param[in] action
2312 : : * Pointer to encap action.
2313 : : * @param[in] mask
2314 : : * Pointer to encap action's mask.
2315 : : * @param[out] conf_encap_data
2316 : : * Buffer where tunnel header will be written.
2317 : : * @param[out] data_size
2318 : : * Pointer to tunnel header size.
2319 : : * @param[out] error
2320 : : * Pointer to error structure.
2321 : : *
2322 : : * @return
2323 : : * 0 or greater if translation was successful.
2324 : : * Negative errno value otherwise.
2325 : : *
2326 : : * If returned value is 0, then action is not shared in the actions template
2327 : : * If bigger than 0, then it is shared.
2328 : : */
2329 : : static int
2330 : 0 : translate_l2_encap_action(const struct rte_flow_action *action,
2331 : : const struct rte_flow_action *mask,
2332 : : uint8_t conf_encap_data[MLX5_ENCAP_MAX_LEN],
2333 : : size_t *data_size,
2334 : : struct rte_flow_error *error)
2335 : : {
2336 : : struct rte_flow_item *conf_item;
2337 : : int ret;
2338 : :
2339 [ # # ]: 0 : if (action->conf == NULL)
2340 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2341 : : "Missing VXLAN/NVGRE encap action configuration");
2342 : :
2343 : : /* Only these 2 action types receive encap data as flow item pattern. */
2344 : : MLX5_ASSERT(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ||
2345 : : action->type == RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP);
2346 [ # # ]: 0 : if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2347 : 0 : conf_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap, action->conf);
2348 : : else
2349 : 0 : conf_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap, action->conf);
2350 [ # # ]: 0 : if (conf_item == NULL)
2351 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
2352 : : "Missing VXLAN/NVGRE tunnel definition in action config");
2353 : :
2354 : 0 : ret = mlx5_flow_dv_convert_encap_data(conf_item, conf_encap_data, data_size, error);
2355 [ # # ]: 0 : if (ret < 0)
2356 : : return ret;
2357 : :
2358 : : /* If mask is provided, then action is shared */
2359 : 0 : return mask->conf != NULL;
2360 : : }
2361 : :
2362 : : static void
2363 : : mlx5_set_reformat_header(struct mlx5dr_action_reformat_header *hdr,
2364 : : uint8_t *encap_data,
2365 : : size_t data_size)
2366 : : {
2367 : 0 : hdr->sz = data_size;
2368 : 0 : hdr->data = encap_data;
2369 : : }
2370 : :
2371 : : static int
2372 [ # # ]: 0 : mlx5_tbl_translate_reformat(struct mlx5_priv *priv,
2373 : : struct mlx5_hw_actions *acts,
2374 : : struct rte_flow_actions_template *at,
2375 : : uint8_t *encap_data, uint8_t *encap_data_m,
2376 : : struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2377 : : size_t data_size, uint16_t reformat_src,
2378 : : enum mlx5dr_action_type refmt_type,
2379 : : struct rte_flow_error *error)
2380 : : {
2381 : : int mp_reformat_ix = mlx5_multi_pattern_reformat_to_index(refmt_type);
2382 : : struct mlx5dr_action_reformat_header hdr;
2383 : : bool shared_rfmt = false;
2384 : : int ret;
2385 : :
2386 : : MLX5_ASSERT(at->reformat_off != UINT16_MAX);
2387 [ # # ]: 0 : if (encap_data && encap_data_m)
2388 : : shared_rfmt = true;
2389 : 0 : acts->encap_decap = mlx5_malloc(MLX5_MEM_ZERO,
2390 : : sizeof(*acts->encap_decap) + data_size,
2391 : : 0, SOCKET_ID_ANY);
2392 [ # # ]: 0 : if (!acts->encap_decap)
2393 : 0 : return rte_flow_error_set(error, ENOMEM,
2394 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2395 : : NULL, "no memory for reformat context");
2396 : 0 : acts->encap_decap_pos = at->reformat_off;
2397 : 0 : acts->encap_decap->data_size = data_size;
2398 : 0 : acts->encap_decap->action_type = refmt_type;
2399 [ # # ]: 0 : if (shared_rfmt || mp_reformat_ix < 0) {
2400 : : uint16_t reformat_ix = at->reformat_off;
2401 : : /*
2402 : : * This copy is only needed in non template mode.
2403 : : * In order to create the action later.
2404 : : */
2405 : 0 : memcpy(acts->encap_decap->data, encap_data, data_size);
2406 : 0 : acts->rule_acts[reformat_ix].reformat.data = acts->encap_decap->data;
2407 : 0 : acts->rule_acts[reformat_ix].reformat.offset = 0;
2408 : 0 : acts->encap_decap->shared = true;
2409 : : } else {
2410 : : uint32_t ix;
2411 : 0 : typeof(mp_ctx->reformat[0]) *reformat = mp_ctx->reformat +
2412 : : mp_reformat_ix;
2413 : : mlx5_set_reformat_header(&hdr, encap_data, data_size);
2414 : 0 : ix = reformat->elements_num++;
2415 : 0 : reformat->reformat_hdr[ix] = hdr;
2416 : 0 : acts->rule_acts[at->reformat_off].reformat.hdr_idx = ix;
2417 : 0 : acts->encap_decap->multi_pattern = 1;
2418 : 0 : ret = __flow_hw_act_data_encap_append
2419 : 0 : (priv, acts, (at->actions + reformat_src)->type,
2420 : : reformat_src, at->reformat_off, data_size);
2421 : : if (ret)
2422 : 0 : return -rte_errno;
2423 : : mlx5_multi_pattern_activate(mp_ctx);
2424 : : }
2425 : : return 0;
2426 : : }
2427 : :
2428 : : static int
2429 : 0 : mlx5_tbl_create_reformat_action(struct mlx5_priv *priv,
2430 : : const struct rte_flow_template_table_attr *table_attr,
2431 : : struct mlx5_hw_actions *acts,
2432 : : struct rte_flow_actions_template *at,
2433 : : uint8_t *encap_data,
2434 : : size_t data_size,
2435 : : enum mlx5dr_action_type refmt_type)
2436 : : {
2437 : 0 : const struct rte_flow_attr *attr = &table_attr->flow_attr;
2438 : : bool unified_fdb = is_unified_fdb(priv);
2439 : 0 : enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr, table_attr->specialize,
2440 : : unified_fdb);
2441 : : struct mlx5dr_action_reformat_header hdr;
2442 : :
2443 : : mlx5_set_reformat_header(&hdr, encap_data, data_size);
2444 : 0 : uint16_t reformat_ix = at->reformat_off;
2445 : 0 : uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] |
2446 : : MLX5DR_ACTION_FLAG_SHARED;
2447 : :
2448 : 0 : acts->encap_decap->action = mlx5dr_action_create_reformat(priv->dr_ctx, refmt_type,
2449 : : 1, &hdr, 0, flags);
2450 [ # # ]: 0 : if (!acts->encap_decap->action)
2451 : 0 : return -rte_errno;
2452 : 0 : acts->rule_acts[reformat_ix].action = acts->encap_decap->action;
2453 : 0 : return 0;
2454 : : }
2455 : :
2456 : : static int
2457 : 0 : mlx5_tbl_translate_modify_header(struct rte_eth_dev *dev,
2458 : : const struct mlx5_flow_template_table_cfg *cfg,
2459 : : struct mlx5_hw_actions *acts,
2460 : : struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2461 : : struct mlx5_hw_modify_header_action *mhdr,
2462 : : struct rte_flow_error *error)
2463 : : {
2464 : 0 : uint16_t mhdr_ix = mhdr->pos;
2465 : : struct mlx5dr_action_mh_pattern pattern = {
2466 : 0 : .sz = sizeof(struct mlx5_modification_cmd) * mhdr->mhdr_cmds_num
2467 : : };
2468 : :
2469 : 0 : int ret = flow_hw_validate_compiled_modify_field(dev, cfg, mhdr, error);
2470 [ # # ]: 0 : if (ret) {
2471 : : __flow_hw_action_template_destroy(dev, acts);
2472 : 0 : return ret;
2473 : : }
2474 : 0 : acts->mhdr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*acts->mhdr),
2475 : : 0, SOCKET_ID_ANY);
2476 [ # # ]: 0 : if (!acts->mhdr)
2477 : 0 : return rte_flow_error_set(error, ENOMEM,
2478 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2479 : : NULL, "translate modify_header: no memory for modify header context");
2480 : : rte_memcpy(acts->mhdr, mhdr, sizeof(*mhdr));
2481 [ # # ]: 0 : if (!mhdr->shared) {
2482 : 0 : pattern.data = (rte_be64_t *)acts->mhdr->mhdr_cmds;
2483 : : typeof(mp_ctx->mh) *mh = &mp_ctx->mh;
2484 : 0 : uint32_t idx = mh->elements_num;
2485 : 0 : mh->pattern[mh->elements_num++] = pattern;
2486 : 0 : acts->mhdr->multi_pattern = 1;
2487 : 0 : acts->rule_acts[mhdr_ix].modify_header.pattern_idx = idx;
2488 : : mlx5_multi_pattern_activate(mp_ctx);
2489 : : }
2490 : : return 0;
2491 : : }
2492 : :
2493 : : static int
2494 : 0 : mlx5_tbl_ensure_shared_modify_header(struct rte_eth_dev *dev,
2495 : : const struct mlx5_flow_template_table_cfg *cfg,
2496 : : struct mlx5_hw_actions *acts,
2497 : : struct rte_flow_error *error)
2498 : : {
2499 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
2500 : : bool unified_fdb = is_unified_fdb(priv);
2501 : : const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2502 : 0 : const struct rte_flow_attr *attr = &table_attr->flow_attr;
2503 : 0 : enum mlx5dr_table_type tbl_type = get_mlx5dr_table_type(attr, table_attr->specialize,
2504 : : unified_fdb);
2505 : : struct mlx5dr_action_mh_pattern pattern;
2506 : :
2507 [ # # ]: 0 : if (!acts->mhdr)
2508 : 0 : return rte_flow_error_set(error, EINVAL,
2509 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2510 : : "translate modify_header: mhdr is NULL");
2511 : :
2512 : 0 : pattern.sz = sizeof(struct mlx5_modification_cmd) * acts->mhdr->mhdr_cmds_num;
2513 : 0 : uint16_t mhdr_ix = acts->mhdr->pos;
2514 : 0 : uint32_t flags = mlx5_hw_act_flag[!!attr->group][tbl_type] | MLX5DR_ACTION_FLAG_SHARED;
2515 : :
2516 : 0 : pattern.data = (rte_be64_t *)acts->mhdr->mhdr_cmds;
2517 : 0 : acts->mhdr->action = mlx5dr_action_create_modify_header(priv->dr_ctx, 1,
2518 : : &pattern, 0, flags);
2519 [ # # ]: 0 : if (!acts->mhdr->action)
2520 : 0 : return rte_flow_error_set(error, rte_errno,
2521 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2522 : : "translate modify_header: failed to create DR action");
2523 : 0 : acts->rule_acts[mhdr_ix].action = acts->mhdr->action;
2524 : 0 : return 0;
2525 : : }
2526 : :
2527 : : static int
2528 : 0 : mlx5_create_ipv6_ext_reformat(struct rte_eth_dev *dev,
2529 : : const struct mlx5_flow_template_table_cfg *cfg,
2530 : : struct mlx5_hw_actions *acts,
2531 : : struct rte_flow_actions_template *at,
2532 : : uint8_t *push_data, uint8_t *push_data_m,
2533 : : size_t push_size, uint16_t recom_src,
2534 : : enum mlx5dr_action_type recom_type)
2535 : : {
2536 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2537 : : bool unified_fdb = is_unified_fdb(priv);
2538 : : const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2539 : 0 : const struct rte_flow_attr *attr = &table_attr->flow_attr;
2540 : : enum mlx5dr_table_type type =
2541 : 0 : get_mlx5dr_table_type(attr, table_attr->specialize, unified_fdb);
2542 : : struct mlx5_action_construct_data *act_data;
2543 : 0 : struct mlx5dr_action_reformat_header hdr = {0};
2544 : : uint32_t flag, bulk = 0;
2545 : :
2546 : 0 : flag = mlx5_hw_act_flag[!!attr->group][type];
2547 : 0 : acts->push_remove = mlx5_malloc(MLX5_MEM_ZERO,
2548 : : sizeof(*acts->push_remove) + push_size,
2549 : : 0, SOCKET_ID_ANY);
2550 [ # # ]: 0 : if (!acts->push_remove)
2551 : : return -ENOMEM;
2552 : :
2553 [ # # # ]: 0 : switch (recom_type) {
2554 : 0 : case MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT:
2555 [ # # ]: 0 : if (!push_data || !push_size)
2556 : 0 : goto err1;
2557 [ # # ]: 0 : if (!push_data_m) {
2558 [ # # ]: 0 : bulk = rte_log2_u32(table_attr->nb_flows);
2559 : : } else {
2560 : 0 : flag |= MLX5DR_ACTION_FLAG_SHARED;
2561 : 0 : acts->push_remove->shared = 1;
2562 : : }
2563 : 0 : acts->push_remove->data_size = push_size;
2564 : 0 : memcpy(acts->push_remove->data, push_data, push_size);
2565 : 0 : hdr.data = push_data;
2566 : 0 : hdr.sz = push_size;
2567 : 0 : break;
2568 : 0 : case MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT:
2569 : 0 : flag |= MLX5DR_ACTION_FLAG_SHARED;
2570 : 0 : acts->push_remove->shared = 1;
2571 : 0 : break;
2572 : : default:
2573 : : break;
2574 : : }
2575 : :
2576 : 0 : acts->push_remove->action =
2577 : 0 : mlx5dr_action_create_reformat_ipv6_ext(priv->dr_ctx,
2578 : : recom_type, &hdr, bulk, flag);
2579 [ # # ]: 0 : if (!acts->push_remove->action)
2580 : 0 : goto err1;
2581 : 0 : acts->rule_acts[at->recom_off].action = acts->push_remove->action;
2582 : 0 : acts->rule_acts[at->recom_off].ipv6_ext.header = acts->push_remove->data;
2583 : 0 : acts->rule_acts[at->recom_off].ipv6_ext.offset = 0;
2584 : 0 : acts->push_remove_pos = at->recom_off;
2585 [ # # ]: 0 : if (!acts->push_remove->shared) {
2586 : 0 : act_data = __flow_hw_act_data_push_append(dev, acts,
2587 : : RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH,
2588 : : recom_src, at->recom_off, push_size);
2589 : : if (!act_data)
2590 : 0 : goto err;
2591 : : }
2592 : : return 0;
2593 : : err:
2594 [ # # ]: 0 : if (acts->push_remove->action)
2595 : 0 : mlx5dr_action_destroy(acts->push_remove->action);
2596 : 0 : err1:
2597 [ # # ]: 0 : if (acts->push_remove) {
2598 : 0 : mlx5_free(acts->push_remove);
2599 : 0 : acts->push_remove = NULL;
2600 : : }
2601 : : return -EINVAL;
2602 : : }
2603 : :
2604 : : static bool
2605 : : is_indirect_action_type_supported_root(const enum rte_flow_action_type type)
2606 : : {
2607 : : switch (type) {
2608 : : case RTE_FLOW_ACTION_TYPE_COUNT:
2609 : : case RTE_FLOW_ACTION_TYPE_AGE:
2610 : : return mlx5dr_action_counter_root_is_supported();
2611 : : default:
2612 : : return false;
2613 : : }
2614 : : }
2615 : :
2616 : : /**
2617 : : * Translate rte_flow actions to DR action.
2618 : : *
2619 : : * As the action template has already indicated the actions. Translate
2620 : : * the rte_flow actions to DR action if possbile. So in flow create
2621 : : * stage we will save cycles from handing the actions' organizing.
2622 : : * For the actions with limited information, need to add these to a
2623 : : * list.
2624 : : *
2625 : : * @param[in] dev
2626 : : * Pointer to the rte_eth_dev structure.
2627 : : * @param[in] cfg
2628 : : * Pointer to the table configuration.
2629 : : * @param[in/out] acts
2630 : : * Pointer to the template HW steering DR actions.
2631 : : * @param[in] at
2632 : : * Action template.
2633 : : * @param[in] nt_mode
2634 : : * Non template rule translate.
2635 : : * @param[out] error
2636 : : * Pointer to error structure.
2637 : : *
2638 : : * @return
2639 : : * 0 on success, a negative errno otherwise and rte_errno is set.
2640 : : */
2641 : : static int
2642 : 0 : __flow_hw_translate_actions_template(struct rte_eth_dev *dev,
2643 : : const struct mlx5_flow_template_table_cfg *cfg,
2644 : : struct mlx5_hw_actions *acts,
2645 : : struct rte_flow_actions_template *at,
2646 : : struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
2647 : : bool nt_mode,
2648 : : struct rte_flow_error *error)
2649 : : {
2650 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
2651 : 0 : struct mlx5_hca_attr *hca_attr = &priv->sh->cdev->config.hca_attr;
2652 : 0 : const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
2653 : 0 : const struct rte_flow_attr *attr = &table_attr->flow_attr;
2654 : 0 : struct rte_flow_action *actions = at->actions;
2655 : 0 : struct rte_flow_action *masks = at->masks;
2656 : : enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
2657 : : enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
2658 : : const struct rte_flow_action_raw_encap *raw_encap_data;
2659 : : const struct rte_flow_action_ipv6_ext_push *ipv6_ext_data;
2660 : : uint16_t reformat_src = 0, recom_src = 0;
2661 : 0 : uint8_t converted_encap_data[MLX5_ENCAP_MAX_LEN] = { 0 };
2662 : : uint8_t *encap_data = NULL, *encap_data_m = NULL;
2663 : : uint8_t *push_data = NULL, *push_data_m = NULL;
2664 : 0 : size_t data_size = 0, push_size = 0;
2665 : : struct mlx5_hw_modify_header_action mhdr = { 0 };
2666 : 0 : struct rte_flow_error sub_error = {
2667 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
2668 : : .cause = NULL,
2669 : : .message = NULL,
2670 : : };
2671 : : bool actions_end = false;
2672 : : uint32_t type;
2673 : : bool reformat_used = false;
2674 : : bool recom_used = false;
2675 : : unsigned int of_vlan_offset;
2676 : : uint32_t ct_idx;
2677 : : int ret, err;
2678 : 0 : bool is_root = mlx5_group_id_is_root(cfg->attr.flow_attr.group);
2679 : : bool unified_fdb = is_unified_fdb(priv);
2680 : : struct mlx5dr_action *dr_action = NULL;
2681 : :
2682 : : flow_hw_modify_field_init(&mhdr, at);
2683 : 0 : type = get_mlx5dr_table_type(attr, cfg->attr.specialize, unified_fdb);
2684 [ # # ]: 0 : for (; !actions_end; actions++, masks++) {
2685 : 0 : uint64_t pos = actions - at->actions;
2686 : 0 : uint16_t src_pos = pos - at->src_off[pos];
2687 : 0 : uint16_t dr_pos = at->dr_off[pos];
2688 : :
2689 [ # # # # : 0 : switch ((int)actions->type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # ]
2690 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
2691 [ # # ]: 0 : if (is_root) {
2692 : 0 : DRV_LOG(ERR, "Indirect action is not supported in root table.");
2693 : 0 : goto err;
2694 : : }
2695 : 0 : ret = table_template_translate_indirect_list
2696 : : (dev, actions, masks, acts, src_pos, dr_pos);
2697 [ # # ]: 0 : if (ret)
2698 : 0 : goto err;
2699 : : break;
2700 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
2701 [ # # ]: 0 : if (is_root && !is_indirect_action_type_supported_root(masks->type)) {
2702 : 0 : DRV_LOG(ERR, "Indirect action type (%d) is not supported on root.",
2703 : : masks->type);
2704 : 0 : goto err;
2705 : : }
2706 [ # # # # ]: 0 : if (actions->conf && masks->conf) {
2707 [ # # # # : 0 : if (flow_hw_shared_action_translate(dev, actions, acts,
# # # # ]
2708 : : src_pos, dr_pos, &sub_error))
2709 : 0 : goto err;
2710 : 0 : } else if (__flow_hw_act_data_indirect_append
2711 : : (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
2712 : : masks->type, src_pos, dr_pos)){
2713 : 0 : goto err;
2714 : : }
2715 : : break;
2716 : : case RTE_FLOW_ACTION_TYPE_VOID:
2717 : : break;
2718 : 0 : case RTE_FLOW_ACTION_TYPE_DROP:
2719 : 0 : dr_action = mlx5_hws_global_action_drop_get(priv, type, is_root);
2720 [ # # ]: 0 : if (dr_action == NULL) {
2721 : 0 : DRV_LOG(ERR, "port %u failed to allocate drop action",
2722 : : priv->dev_data->port_id);
2723 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
2724 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
2725 : : "failed to allocate drop action");
2726 : 0 : goto err;
2727 : : }
2728 : 0 : acts->rule_acts[dr_pos].action = dr_action;
2729 : 0 : break;
2730 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
2731 [ # # ]: 0 : if (is_root) {
2732 : 0 : DRV_LOG(ERR, "Port representor is not supported in root table.");
2733 : 0 : goto err;
2734 : : }
2735 : 0 : dr_action = mlx5_hws_global_action_def_miss_get(priv, type, is_root);
2736 [ # # ]: 0 : if (dr_action == NULL) {
2737 : 0 : DRV_LOG(ERR, "port %u failed to allocate port representor action",
2738 : : priv->dev_data->port_id);
2739 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
2740 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
2741 : : "failed to allocate port representor action");
2742 : 0 : goto err;
2743 : : }
2744 : 0 : acts->rule_acts[dr_pos].action = dr_action;
2745 : 0 : break;
2746 : 0 : case RTE_FLOW_ACTION_TYPE_FLAG:
2747 : 0 : dr_action = mlx5_hws_global_action_tag_get(priv, type, is_root);
2748 [ # # ]: 0 : if (dr_action == NULL) {
2749 : 0 : DRV_LOG(ERR, "port %u failed to allocate flag action",
2750 : : priv->dev_data->port_id);
2751 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
2752 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
2753 : : "failed to allocate flag action");
2754 : 0 : goto err;
2755 : : }
2756 : 0 : acts->mark = true;
2757 : 0 : acts->rule_acts[dr_pos].tag.value =
2758 : : mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
2759 : 0 : acts->rule_acts[dr_pos].action = dr_action;
2760 : 0 : rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2761 : : rte_memory_order_relaxed);
2762 : 0 : mlx5_flow_hw_rxq_flag_set(dev, true);
2763 : 0 : break;
2764 : 0 : case RTE_FLOW_ACTION_TYPE_MARK:
2765 : 0 : dr_action = mlx5_hws_global_action_tag_get(priv, type, is_root);
2766 [ # # ]: 0 : if (dr_action == NULL) {
2767 : 0 : DRV_LOG(ERR, "port %u failed to allocate mark action",
2768 : : priv->dev_data->port_id);
2769 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
2770 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
2771 : : "failed to allocate mark action");
2772 : 0 : goto err;
2773 : : }
2774 : 0 : acts->mark = true;
2775 [ # # ]: 0 : if (masks->conf &&
2776 : : ((const struct rte_flow_action_mark *)
2777 [ # # ]: 0 : masks->conf)->id)
2778 : 0 : acts->rule_acts[dr_pos].tag.value =
2779 : 0 : mlx5_flow_mark_set
2780 : : (((const struct rte_flow_action_mark *)
2781 : 0 : (actions->conf))->id);
2782 : 0 : else if (__flow_hw_act_data_general_append(priv, acts,
2783 : : actions->type,
2784 : : src_pos, dr_pos))
2785 : 0 : goto err;
2786 : 0 : acts->rule_acts[dr_pos].action = dr_action;
2787 : 0 : rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
2788 : : rte_memory_order_relaxed);
2789 : 0 : mlx5_flow_hw_rxq_flag_set(dev, true);
2790 : 0 : break;
2791 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
2792 : 0 : dr_action = mlx5_hws_global_action_push_vlan_get(priv, type, is_root);
2793 [ # # ]: 0 : if (dr_action == NULL) {
2794 : 0 : DRV_LOG(ERR, "port %u failed to allocate push VLAN action",
2795 : : priv->dev_data->port_id);
2796 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
2797 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
2798 : : "failed to allocate push VLAN action");
2799 : 0 : goto err;
2800 : : }
2801 : 0 : acts->rule_acts[dr_pos].action = dr_action;
2802 [ # # # # ]: 0 : if (is_template_masked_push_vlan(masks->conf))
2803 : 0 : acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
2804 : 0 : vlan_hdr_to_be32(actions);
2805 : 0 : else if (__flow_hw_act_data_general_append
2806 : : (priv, acts, actions->type,
2807 : : src_pos, dr_pos))
2808 : 0 : goto err;
2809 : : of_vlan_offset = is_of_vlan_pcp_present(actions) ?
2810 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
2811 : : MLX5_HW_VLAN_PUSH_VID_IDX;
2812 : 0 : actions += of_vlan_offset;
2813 : 0 : masks += of_vlan_offset;
2814 : 0 : break;
2815 : 0 : case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
2816 : 0 : dr_action = mlx5_hws_global_action_pop_vlan_get(priv, type, is_root);
2817 [ # # ]: 0 : if (dr_action == NULL) {
2818 : 0 : DRV_LOG(ERR, "port %u failed to allocate pop VLAN action",
2819 : : priv->dev_data->port_id);
2820 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
2821 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
2822 : : "failed to allocate pop VLAN action");
2823 : 0 : goto err;
2824 : : }
2825 : 0 : acts->rule_acts[dr_pos].action = dr_action;
2826 : 0 : break;
2827 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
2828 [ # # ]: 0 : if (masks->conf &&
2829 : : ((const struct rte_flow_action_jump *)
2830 [ # # ]: 0 : masks->conf)->group) {
2831 : 0 : uint32_t jump_group =
2832 : : ((const struct rte_flow_action_jump *)
2833 : 0 : actions->conf)->group;
2834 : 0 : acts->jump = flow_hw_jump_action_register
2835 : : (dev, cfg, jump_group, &sub_error);
2836 [ # # ]: 0 : if (!acts->jump)
2837 : 0 : goto err;
2838 : 0 : acts->rule_acts[dr_pos].action = (!!attr->group) ?
2839 [ # # ]: 0 : acts->jump->hws_action :
2840 : : acts->jump->root_action;
2841 : : } else if (__flow_hw_act_data_general_append
2842 : : (priv, acts, actions->type,
2843 : : src_pos, dr_pos)){
2844 : 0 : goto err;
2845 : : }
2846 : : break;
2847 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
2848 [ # # ]: 0 : if (masks->conf &&
2849 : : ((const struct rte_flow_action_queue *)
2850 [ # # ]: 0 : masks->conf)->index) {
2851 : 0 : acts->tir = flow_hw_tir_action_register
2852 : 0 : (dev, mlx5_hw_act_flag[!!attr->group][type],
2853 : : actions);
2854 [ # # ]: 0 : if (!acts->tir)
2855 : 0 : goto err;
2856 : 0 : acts->rule_acts[dr_pos].action =
2857 : 0 : acts->tir->action;
2858 : : } else if (__flow_hw_act_data_general_append
2859 : : (priv, acts, actions->type,
2860 : : src_pos, dr_pos)) {
2861 : 0 : goto err;
2862 : : }
2863 : : break;
2864 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
2865 [ # # # # ]: 0 : if (actions->conf && masks->conf) {
2866 : 0 : acts->tir = flow_hw_tir_action_register
2867 : 0 : (dev, mlx5_hw_act_flag[!!attr->group][type],
2868 : : actions);
2869 [ # # ]: 0 : if (!acts->tir)
2870 : 0 : goto err;
2871 : 0 : acts->rule_acts[dr_pos].action =
2872 : 0 : acts->tir->action;
2873 : : } else if (__flow_hw_act_data_general_append
2874 : : (priv, acts, actions->type,
2875 : : src_pos, dr_pos)) {
2876 : 0 : goto err;
2877 : : }
2878 : : break;
2879 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2880 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2881 : : MLX5_ASSERT(!reformat_used);
2882 : 0 : ret = translate_l2_encap_action(actions, masks, converted_encap_data,
2883 : : &data_size, error);
2884 [ # # ]: 0 : if (ret < 0)
2885 : 0 : goto err;
2886 : : /* If masked action, then use converted encap data for shared action. */
2887 [ # # ]: 0 : if (ret > 0) {
2888 : : encap_data = converted_encap_data;
2889 : : encap_data_m = converted_encap_data;
2890 : : }
2891 : : reformat_used = true;
2892 : : reformat_src = src_pos;
2893 : : refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2894 : : break;
2895 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2896 : 0 : raw_encap_data =
2897 : : (const struct rte_flow_action_raw_encap *)
2898 : : masks->conf;
2899 [ # # ]: 0 : if (raw_encap_data)
2900 : 0 : encap_data_m = raw_encap_data->data;
2901 : 0 : raw_encap_data =
2902 : : (const struct rte_flow_action_raw_encap *)
2903 : : actions->conf;
2904 : 0 : encap_data = raw_encap_data->data;
2905 : 0 : data_size = raw_encap_data->size;
2906 [ # # ]: 0 : if (reformat_used) {
2907 : : refmt_type = data_size <
2908 : : MLX5_ENCAPSULATION_DECISION_SIZE ?
2909 [ # # ]: 0 : MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
2910 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
2911 : : } else {
2912 : : reformat_used = true;
2913 : : refmt_type =
2914 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
2915 : : }
2916 : : reformat_src = src_pos;
2917 : : break;
2918 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2919 : : case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2920 : : MLX5_ASSERT(!reformat_used);
2921 : : reformat_used = true;
2922 : : refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2923 : 0 : break;
2924 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2925 : : reformat_used = true;
2926 : : refmt_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
2927 : 0 : break;
2928 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
2929 [ # # ]: 0 : if (!hca_attr->flex.query_match_sample_info ||
2930 : 0 : !hca_attr->flex.parse_graph_anchor ||
2931 [ # # ]: 0 : !priv->sh->srh_flex_parser.flex.mapnum) {
2932 : 0 : DRV_LOG(ERR, "SRv6 anchor is not supported.");
2933 : 0 : goto err;
2934 : : }
2935 : : MLX5_ASSERT(!recom_used && !recom_type);
2936 : : recom_used = true;
2937 : : recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
2938 : 0 : ipv6_ext_data =
2939 : : (const struct rte_flow_action_ipv6_ext_push *)masks->conf;
2940 [ # # ]: 0 : if (ipv6_ext_data)
2941 : 0 : push_data_m = ipv6_ext_data->data;
2942 : 0 : ipv6_ext_data =
2943 : : (const struct rte_flow_action_ipv6_ext_push *)actions->conf;
2944 [ # # ]: 0 : if (ipv6_ext_data) {
2945 : 0 : push_data = ipv6_ext_data->data;
2946 : 0 : push_size = ipv6_ext_data->size;
2947 : : }
2948 : : recom_src = src_pos;
2949 : : break;
2950 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
2951 [ # # ]: 0 : if (!hca_attr->flex.query_match_sample_info ||
2952 : 0 : !hca_attr->flex.parse_graph_anchor ||
2953 [ # # ]: 0 : !priv->sh->srh_flex_parser.flex.mapnum) {
2954 : 0 : DRV_LOG(ERR, "SRv6 anchor is not supported.");
2955 : 0 : goto err;
2956 : : }
2957 : : recom_used = true;
2958 : : recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
2959 : : break;
2960 : 0 : case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
2961 [ # # ]: 0 : if (is_root) {
2962 : 0 : rte_flow_error_set(&sub_error, ENOTSUP,
2963 : : RTE_FLOW_ERROR_TYPE_ACTION,
2964 : : NULL,
2965 : : "Send to kernel action on root table is not supported in HW steering mode");
2966 : 0 : goto err;
2967 : : }
2968 : 0 : dr_action = mlx5_hws_global_action_send_to_kernel_get(priv,
2969 : : type,
2970 : : MLX5_HW_LOWEST_PRIO_ROOT);
2971 [ # # ]: 0 : if (dr_action == NULL) {
2972 : 0 : DRV_LOG(ERR, "port %u failed to allocate send to kernel action",
2973 : : priv->dev_data->port_id);
2974 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
2975 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
2976 : : "failed to allocate send to kernel action");
2977 : 0 : goto err;
2978 : : }
2979 : 0 : acts->rule_acts[dr_pos].action = dr_action;
2980 : 0 : break;
2981 [ # # ]: 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
2982 : : err = flow_hw_modify_field_compile(dev, attr, actions,
2983 : : masks, acts, &mhdr,
2984 : : src_pos, &sub_error);
2985 [ # # ]: 0 : if (err)
2986 : 0 : goto err;
2987 : : break;
2988 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
2989 [ # # ]: 0 : if (flow_hw_represented_port_compile
2990 : : (dev, attr, actions,
2991 : : masks, acts, src_pos, dr_pos, &sub_error))
2992 : 0 : goto err;
2993 : : break;
2994 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
2995 [ # # ]: 0 : if (is_root && !mlx5dr_action_counter_root_is_supported()) {
2996 : 0 : rte_flow_error_set(&sub_error, ENOTSUP,
2997 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2998 : : "Age action is not supported on group 0");
2999 : 0 : goto err;
3000 : : }
3001 : : if (__flow_hw_act_data_general_append(priv, acts,
3002 : : actions->type,
3003 : : src_pos,
3004 : : dr_pos))
3005 : 0 : goto err;
3006 : : break;
3007 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
3008 [ # # ]: 0 : if (is_root && !mlx5dr_action_counter_root_is_supported()) {
3009 : 0 : rte_flow_error_set(&sub_error, ENOTSUP,
3010 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3011 : : "Count action is not supported on root table");
3012 : 0 : goto err;
3013 : : }
3014 [ # # ]: 0 : if ((at->action_flags & MLX5_FLOW_ACTION_AGE) ||
3015 : : (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
3016 : : /*
3017 : : * When both COUNT and AGE are requested, it is
3018 : : * saved as AGE action which creates also the
3019 : : * counter.
3020 : : */
3021 : : break;
3022 [ # # ]: 0 : if (masks->conf &&
3023 : : ((const struct rte_flow_action_count *)
3024 [ # # ]: 0 : masks->conf)->id) {
3025 [ # # ]: 0 : err = flow_hw_cnt_compile(dev, dr_pos, acts, is_root);
3026 : : if (err)
3027 : 0 : goto err;
3028 : : } else if (__flow_hw_act_data_general_append
3029 : : (priv, acts, actions->type,
3030 : : src_pos, dr_pos)) {
3031 : 0 : goto err;
3032 : : }
3033 : : break;
3034 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3035 [ # # ]: 0 : if (masks->conf) {
3036 : 0 : ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(actions->conf);
3037 : : if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
3038 : 0 : &acts->rule_acts[dr_pos]))
3039 : 0 : goto err;
3040 : : } else if (__flow_hw_act_data_general_append
3041 : : (priv, acts, actions->type,
3042 : : src_pos, dr_pos)) {
3043 : 0 : goto err;
3044 : : }
3045 : : break;
3046 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
3047 [ # # # # ]: 0 : if (actions->conf && masks->conf &&
3048 : : ((const struct rte_flow_action_meter_mark *)
3049 [ # # ]: 0 : masks->conf)->profile) {
3050 : : err = flow_hw_meter_mark_compile(dev,
3051 : : dr_pos, actions,
3052 : 0 : acts->rule_acts,
3053 : : &acts->mtr_id,
3054 : : MLX5_HW_INV_QUEUE,
3055 : : &sub_error);
3056 : : if (err)
3057 : 0 : goto err;
3058 : : } else if (__flow_hw_act_data_general_append(priv, acts,
3059 : : actions->type,
3060 : : src_pos,
3061 : : dr_pos))
3062 : 0 : goto err;
3063 : : break;
3064 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
3065 : 0 : dr_action = mlx5_hws_global_action_def_miss_get(priv, type, is_root);
3066 [ # # ]: 0 : if (dr_action == NULL) {
3067 : 0 : DRV_LOG(ERR, "port %u failed to allocate default miss action",
3068 : : priv->dev_data->port_id);
3069 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
3070 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
3071 : : "failed to allocate default miss action");
3072 : 0 : goto err;
3073 : : }
3074 : 0 : acts->rule_acts[dr_pos].action = dr_action;
3075 : 0 : break;
3076 : 0 : case RTE_FLOW_ACTION_TYPE_NAT64:
3077 [ # # ]: 0 : if (masks->conf &&
3078 [ # # ]: 0 : ((const struct rte_flow_action_nat64 *)masks->conf)->type) {
3079 : 0 : const struct rte_flow_action_nat64 *nat64_c =
3080 : : (const struct rte_flow_action_nat64 *)actions->conf;
3081 : 0 : dr_action = mlx5_hws_global_action_nat64_get(priv,
3082 : : type,
3083 : 0 : nat64_c->type);
3084 [ # # ]: 0 : if (dr_action == NULL) {
3085 : 0 : DRV_LOG(ERR, "port %u failed to allocate NAT64 action",
3086 : : priv->dev_data->port_id);
3087 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
3088 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
3089 : : "failed to allocate NAT64 action");
3090 : 0 : goto err;
3091 : : }
3092 : 0 : acts->rule_acts[dr_pos].action = dr_action;
3093 : 0 : break;
3094 : : }
3095 : 0 : acts->nat64[RTE_FLOW_NAT64_6TO4] = mlx5_hws_global_action_nat64_get(priv,
3096 : : type,
3097 : : RTE_FLOW_NAT64_6TO4);
3098 : 0 : acts->nat64[RTE_FLOW_NAT64_4TO6] = mlx5_hws_global_action_nat64_get(priv,
3099 : : type,
3100 : : RTE_FLOW_NAT64_4TO6);
3101 [ # # # # ]: 0 : if (!acts->nat64[RTE_FLOW_NAT64_6TO4] ||
3102 : : !acts->nat64[RTE_FLOW_NAT64_4TO6]) {
3103 : 0 : DRV_LOG(ERR, "port %u failed to allocate both NAT64 actions",
3104 : : priv->dev_data->port_id);
3105 : 0 : rte_flow_error_set(&sub_error, ENOMEM,
3106 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
3107 : : "failed to allocate both NAT64 actions");
3108 : 0 : goto err;
3109 : : }
3110 : 0 : if (__flow_hw_act_data_general_append(priv, acts,
3111 : : actions->type,
3112 : : src_pos, dr_pos))
3113 : 0 : goto err;
3114 : : break;
3115 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
3116 [ # # ]: 0 : if (masks->conf &&
3117 : : ((const struct rte_flow_action_jump_to_table_index *)
3118 [ # # ]: 0 : masks->conf)->table) {
3119 : 0 : struct rte_flow_template_table *jump_table =
3120 : : ((const struct rte_flow_action_jump_to_table_index *)
3121 : 0 : actions->conf)->table;
3122 : 0 : acts->rule_acts[dr_pos].jump_to_matcher.offset =
3123 : : ((const struct rte_flow_action_jump_to_table_index *)
3124 : 0 : actions->conf)->index;
3125 [ # # ]: 0 : if (likely(!rte_flow_template_table_resizable(dev->data->port_id,
3126 : : &jump_table->cfg.attr))) {
3127 : 0 : acts->rule_acts[dr_pos].action =
3128 : 0 : jump_table->matcher_info[0].jump;
3129 : : } else {
3130 : : uint32_t selector;
3131 : 0 : rte_rwlock_read_lock(&jump_table->matcher_replace_rwlk);
3132 : 0 : selector = jump_table->matcher_selector;
3133 : 0 : acts->rule_acts[dr_pos].action =
3134 : 0 : jump_table->matcher_info[selector].jump;
3135 : 0 : rte_rwlock_read_unlock(&jump_table->matcher_replace_rwlk);
3136 : : }
3137 : : } else if (__flow_hw_act_data_general_append
3138 : : (priv, acts, actions->type,
3139 : : src_pos, dr_pos)){
3140 : 0 : goto err;
3141 : : }
3142 : : break;
3143 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_MIRROR:
3144 : : if (__flow_hw_act_data_general_append(priv, acts,
3145 : : actions->type,
3146 : : src_pos, dr_pos))
3147 : 0 : goto err;
3148 : : break;
3149 : 0 : case RTE_FLOW_ACTION_TYPE_END:
3150 : : actions_end = true;
3151 : 0 : break;
3152 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_ID:
3153 : 0 : DRV_LOG(ERR, "RTE_FLOW_ACTION_TYPE_PORT_ID action is not supported. "
3154 : : "Use RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT instead.");
3155 : 0 : goto err;
3156 : : default:
3157 : : break;
3158 : : }
3159 : : }
3160 [ # # ]: 0 : if (mhdr.pos != UINT16_MAX) {
3161 : 0 : ret = mlx5_tbl_translate_modify_header(dev, cfg, acts, mp_ctx, &mhdr, &sub_error);
3162 [ # # ]: 0 : if (ret)
3163 : 0 : goto err;
3164 [ # # # # ]: 0 : if (!nt_mode && mhdr.shared) {
3165 : 0 : ret = mlx5_tbl_ensure_shared_modify_header(dev, cfg, acts, &sub_error);
3166 [ # # ]: 0 : if (ret)
3167 : 0 : goto err;
3168 : : }
3169 : : }
3170 [ # # ]: 0 : if (reformat_used) {
3171 : 0 : ret = mlx5_tbl_translate_reformat(priv, acts, at,
3172 : : encap_data, encap_data_m,
3173 : : mp_ctx, data_size,
3174 : : reformat_src,
3175 : : refmt_type, &sub_error);
3176 [ # # ]: 0 : if (ret)
3177 : 0 : goto err;
3178 [ # # # # ]: 0 : if (!nt_mode && acts->encap_decap->shared) {
3179 : 0 : ret = mlx5_tbl_create_reformat_action(priv, table_attr, acts, at,
3180 : : encap_data, data_size,
3181 : : refmt_type);
3182 [ # # ]: 0 : if (ret)
3183 : 0 : goto err;
3184 : : }
3185 : : }
3186 [ # # ]: 0 : if (recom_used) {
3187 : : MLX5_ASSERT(at->recom_off != UINT16_MAX);
3188 : 0 : ret = mlx5_create_ipv6_ext_reformat(dev, cfg, acts, at, push_data,
3189 : : push_data_m, push_size, recom_src,
3190 : : recom_type);
3191 [ # # ]: 0 : if (ret)
3192 : 0 : goto err;
3193 : : }
3194 : : return 0;
3195 : 0 : err:
3196 : : /* If rte_errno was not initialized and reached error state. */
3197 [ # # ]: 0 : if (!rte_errno)
3198 : 0 : rte_errno = EINVAL;
3199 : 0 : err = rte_errno;
3200 : 0 : __flow_hw_action_template_destroy(dev, acts);
3201 [ # # # # ]: 0 : if (error != NULL && sub_error.type != RTE_FLOW_ERROR_TYPE_NONE) {
3202 : : rte_memcpy(error, &sub_error, sizeof(sub_error));
3203 : 0 : return -EINVAL;
3204 : : }
3205 : 0 : return rte_flow_error_set(error, err,
3206 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3207 : : "fail to create rte table");
3208 : : }
3209 : :
3210 : : /**
3211 : : * Translate rte_flow actions to DR action.
3212 : : *
3213 : : * As the action template has already indicated the actions. Translate
3214 : : * the rte_flow actions to DR action if possible. So in flow create
3215 : : * stage we will save cycles from handing the actions' organizing.
3216 : : * For the actions with limited information, need to add these to a
3217 : : * list.
3218 : : *
3219 : : * @param[in] dev
3220 : : * Pointer to the rte_eth_dev structure.
3221 : : * @param[in] cfg
3222 : : * Pointer to the table configuration.
3223 : : * @param[in/out] acts
3224 : : * Pointer to the template HW steering DR actions.
3225 : : * @param[in] at
3226 : : * Action template.
3227 : : * @param[out] error
3228 : : * Pointer to error structure.
3229 : : *
3230 : : * @return
3231 : : * 0 on success, a negative errno otherwise and rte_errno is set.
3232 : : */
3233 : : static int
3234 : : flow_hw_translate_actions_template(struct rte_eth_dev *dev,
3235 : : const struct mlx5_flow_template_table_cfg *cfg,
3236 : : struct mlx5_hw_actions *acts,
3237 : : struct rte_flow_actions_template *at,
3238 : : struct mlx5_tbl_multi_pattern_ctx *mp_ctx,
3239 : : struct rte_flow_error *error)
3240 : : {
3241 : 0 : return __flow_hw_translate_actions_template(dev, cfg, acts, at, mp_ctx, false, error);
3242 : : }
3243 : :
3244 : : static __rte_always_inline struct mlx5dr_rule_action *
3245 : : flow_hw_get_dr_action_buffer(struct mlx5_priv *priv,
3246 : : struct rte_flow_template_table *table,
3247 : : uint8_t action_template_index,
3248 : : uint32_t queue)
3249 : : {
3250 : 0 : uint32_t offset = action_template_index * priv->nb_queue + queue;
3251 : :
3252 : 0 : return &table->rule_acts[offset].acts[0];
3253 : : }
3254 : :
3255 : : static void
3256 : 0 : flow_hw_populate_rule_acts_caches(struct rte_eth_dev *dev,
3257 : : struct rte_flow_template_table *table,
3258 : : uint8_t at_idx)
3259 : : {
3260 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3261 : : uint32_t q;
3262 : :
3263 [ # # ]: 0 : for (q = 0; q < priv->nb_queue; ++q) {
3264 : : struct mlx5dr_rule_action *rule_acts =
3265 : 0 : flow_hw_get_dr_action_buffer(priv, table, at_idx, q);
3266 : :
3267 [ # # ]: 0 : rte_memcpy(rule_acts, table->ats[at_idx].acts.rule_acts,
3268 : : sizeof(table->ats[at_idx].acts.rule_acts));
3269 : : }
3270 : 0 : }
3271 : :
3272 : : /**
3273 : : * Translate rte_flow actions to DR action.
3274 : : *
3275 : : * @param[in] dev
3276 : : * Pointer to the rte_eth_dev structure.
3277 : : * @param[in] tbl
3278 : : * Pointer to the flow template table.
3279 : : * @param[out] error
3280 : : * Pointer to error structure.
3281 : : *
3282 : : * @return
3283 : : * 0 on success, negative value otherwise and rte_errno is set.
3284 : : */
3285 : : static int
3286 : 0 : flow_hw_translate_all_actions_templates(struct rte_eth_dev *dev,
3287 : : struct rte_flow_template_table *tbl,
3288 : : struct rte_flow_error *error)
3289 : : {
3290 : : int ret;
3291 : : uint32_t i;
3292 : :
3293 [ # # ]: 0 : for (i = 0; i < tbl->nb_action_templates; i++) {
3294 [ # # ]: 0 : if (flow_hw_translate_actions_template(dev, &tbl->cfg,
3295 : : &tbl->ats[i].acts,
3296 : : tbl->ats[i].action_template,
3297 : : &tbl->mpctx, error))
3298 : 0 : goto err;
3299 : 0 : flow_hw_populate_rule_acts_caches(dev, tbl, i);
3300 : : }
3301 [ # # ]: 0 : ret = mlx5_tbl_multi_pattern_process(dev, tbl, &tbl->mpctx.segments[0],
3302 : : rte_log2_u32(tbl->cfg.attr.nb_flows),
3303 : : error);
3304 [ # # ]: 0 : if (ret)
3305 : 0 : goto err;
3306 : : return 0;
3307 : : err:
3308 [ # # ]: 0 : while (i--)
3309 : 0 : __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
3310 : : return -1;
3311 : : }
3312 : :
3313 : : /**
3314 : : * Get shared indirect action.
3315 : : *
3316 : : * @param[in] dev
3317 : : * Pointer to the rte_eth_dev data structure.
3318 : : * @param[in] act_data
3319 : : * Pointer to the recorded action construct data.
3320 : : * @param[in] item_flags
3321 : : * The matcher itme_flags used for RSS lookup.
3322 : : * @param[in] rule_act
3323 : : * Pointer to the shared action's destination rule DR action.
3324 : : *
3325 : : * @return
3326 : : * 0 on success, negative value otherwise and rte_errno is set.
3327 : : */
3328 : : static __rte_always_inline int
3329 : : flow_hw_shared_action_get(struct rte_eth_dev *dev,
3330 : : struct mlx5_action_construct_data *act_data,
3331 : : const uint64_t item_flags,
3332 : : struct mlx5dr_rule_action *rule_act)
3333 : : {
3334 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3335 : 0 : struct mlx5_flow_rss_desc rss_desc = { 0 };
3336 : 0 : uint64_t hash_fields = 0;
3337 : : uint32_t hrxq_idx = 0;
3338 : : struct mlx5_hrxq *hrxq = NULL;
3339 : : int act_type = act_data->type;
3340 : :
3341 : : switch (act_type) {
3342 : : case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3343 : 0 : rss_desc.level = act_data->shared_rss.level;
3344 : 0 : rss_desc.types = act_data->shared_rss.types;
3345 : 0 : rss_desc.symmetric_hash_function = act_data->shared_rss.symmetric_hash_function;
3346 : 0 : mlx5_flow_dv_hashfields_set(item_flags, &rss_desc, &hash_fields);
3347 : 0 : hrxq_idx = mlx5_flow_dv_action_rss_hrxq_lookup
3348 : : (dev, act_data->shared_rss.idx, hash_fields);
3349 [ # # # # : 0 : if (hrxq_idx)
# # # # #
# # # # #
# # # # #
# ]
3350 : 0 : hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
3351 : : hrxq_idx);
3352 [ # # # # : 0 : if (hrxq) {
# # # # #
# # # # #
# # # # #
# ]
3353 : 0 : rule_act->action = hrxq->action;
3354 : : return 0;
3355 : : }
3356 : : break;
3357 : : default:
3358 : : DRV_LOG(WARNING, "Unsupported shared action type:%d",
3359 : : act_data->type);
3360 : : break;
3361 : : }
3362 : : return -1;
3363 : : }
3364 : :
3365 : : static void
3366 : 0 : flow_hw_construct_quota(struct mlx5_priv *priv,
3367 : : struct mlx5dr_rule_action *rule_act, uint32_t qid)
3368 : : {
3369 : 0 : rule_act->action = priv->quota_ctx.dr_action;
3370 : 0 : rule_act->aso_meter.offset = qid - 1;
3371 : 0 : rule_act->aso_meter.init_color =
3372 : : MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
3373 : 0 : }
3374 : :
3375 : : /**
3376 : : * Construct shared indirect action.
3377 : : *
3378 : : * @param[in] dev
3379 : : * Pointer to the rte_eth_dev data structure.
3380 : : * @param[in] queue
3381 : : * The flow creation queue index.
3382 : : * @param[in] action
3383 : : * Pointer to the shared indirect rte_flow action.
3384 : : * @param[in] table
3385 : : * Pointer to the flow table.
3386 : : * @param[in] item_flags
3387 : : * Item flags.
3388 : : * @param[in] action_flags
3389 : : * Actions bit-map detected in this template.
3390 : : * @param[in, out] flow
3391 : : * Pointer to the flow containing the counter.
3392 : : * @param[in] rule_act
3393 : : * Pointer to the shared action's destination rule DR action.
3394 : : *
3395 : : * @return
3396 : : * 0 on success, negative value otherwise and rte_errno is set.
3397 : : */
3398 : : static __rte_always_inline int
3399 : : flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
3400 : : const struct rte_flow_action *action,
3401 : : struct rte_flow_template_table *table,
3402 : : const uint64_t item_flags, uint64_t action_flags,
3403 : : struct rte_flow_hw *flow,
3404 : : struct mlx5dr_rule_action *rule_act)
3405 : : {
3406 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3407 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3408 : : struct mlx5_action_construct_data act_data;
3409 : : struct mlx5_shared_action_rss *shared_rss;
3410 : : struct mlx5_aso_mtr *aso_mtr;
3411 : : struct mlx5_age_info *age_info;
3412 : : struct mlx5_hws_age_param *param;
3413 : : struct rte_flow_hw_aux *aux;
3414 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)action->conf;
3415 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3416 : 0 : uint32_t idx = act_idx &
3417 : : ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3418 : : uint32_t *cnt_queue;
3419 : : cnt_id_t age_cnt;
3420 : 0 : bool is_root = mlx5_group_id_is_root(table->grp->group_id);
3421 : :
3422 : : memset(&act_data, 0, sizeof(act_data));
3423 [ # # # # : 0 : switch (type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
3424 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
3425 : 0 : act_data.type = MLX5_RTE_FLOW_ACTION_TYPE_RSS;
3426 : 0 : shared_rss = mlx5_ipool_get
3427 : 0 : (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3428 [ # # # # : 0 : if (!shared_rss)
# # # # #
# ]
3429 : : return -1;
3430 : 0 : act_data.shared_rss.idx = idx;
3431 : 0 : act_data.shared_rss.level = shared_rss->origin.level;
3432 : 0 : act_data.shared_rss.types = !shared_rss->origin.types ?
3433 [ # # # # : 0 : RTE_ETH_RSS_IP :
# # # # #
# ]
3434 : : shared_rss->origin.types;
3435 : 0 : act_data.shared_rss.symmetric_hash_function =
3436 : 0 : MLX5_RSS_IS_SYMM(shared_rss->origin.func);
3437 : :
3438 : : if (flow_hw_shared_action_get
3439 : : (dev, &act_data, item_flags, rule_act))
3440 : : return -1;
3441 : : break;
3442 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
3443 [ # # # # : 0 : if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
# # # # #
# ]
3444 : : act_idx,
3445 : : &rule_act->action,
3446 : : &rule_act->counter.offset,
3447 : : is_root))
3448 : : return -1;
3449 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3450 : 0 : flow->cnt_id = act_idx;
3451 : : break;
3452 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
3453 [ # # # # : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
# # # # #
# ]
3454 : : /*
3455 : : * Save the index with the indirect type, to recognize
3456 : : * it in flow destroy.
3457 : : */
3458 : : mlx5_flow_hw_aux_set_age_idx(flow, aux, act_idx);
3459 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3460 [ # # # # : 0 : if (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
# # # # #
# ]
3461 : : /*
3462 : : * The mutual update for idirect AGE & COUNT will be
3463 : : * performed later after we have ID for both of them.
3464 : : */
3465 : : break;
3466 : 0 : age_info = GET_PORT_AGE_INFO(priv);
3467 : 0 : param = mlx5_ipool_get(age_info->ages_ipool, idx);
3468 [ # # # # : 0 : if (param == NULL)
# # # # #
# ]
3469 : : return -1;
3470 [ # # # # : 0 : if (action_flags & MLX5_FLOW_ACTION_COUNT) {
# # # # #
# ]
3471 : : cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
3472 [ # # # # : 0 : if (mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &age_cnt, idx, 0) < 0)
# # # # #
# ]
3473 : : return -1;
3474 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3475 : 0 : flow->cnt_id = age_cnt;
3476 : 0 : param->nb_cnts++;
3477 : : } else {
3478 : : /*
3479 : : * Get the counter of this indirect AGE or create one
3480 : : * if doesn't exist.
3481 : : */
3482 : : age_cnt = mlx5_hws_age_cnt_get(priv, param, idx);
3483 [ # # # # : 0 : if (age_cnt == 0)
# # # # #
# ]
3484 : : return -1;
3485 : : }
3486 [ # # # # : 0 : if (mlx5_hws_cnt_pool_get_action_offset(priv->hws_cpool,
# # # # #
# ]
3487 : : age_cnt, &rule_act->action,
3488 : : &rule_act->counter.offset, is_root))
3489 : : return -1;
3490 : : break;
3491 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
3492 : : if (flow_hw_ct_compile(dev, queue, idx, rule_act))
3493 : : return -1;
3494 : : break;
3495 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
3496 : : /* Find ASO object. */
3497 : 0 : aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
3498 [ # # # # : 0 : if (!aso_mtr)
# # # # #
# ]
3499 : : return -1;
3500 : 0 : rule_act->action = pool->action;
3501 : 0 : rule_act->aso_meter.offset = aso_mtr->offset;
3502 : : break;
3503 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
3504 : 0 : flow_hw_construct_quota(priv, rule_act, idx);
3505 : : break;
3506 : 0 : default:
3507 : 0 : DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
3508 : : break;
3509 : : }
3510 : : return 0;
3511 : : }
3512 : :
3513 : : static __rte_always_inline int
3514 : : flow_hw_mhdr_cmd_is_nop(const struct mlx5_modification_cmd *cmd)
3515 : : {
3516 : : struct mlx5_modification_cmd cmd_he = {
3517 : 0 : .data0 = rte_be_to_cpu_32(cmd->data0),
3518 : : .data1 = 0,
3519 : : };
3520 : :
3521 : 0 : return cmd_he.action_type == MLX5_MODIFICATION_TYPE_NOP;
3522 : : }
3523 : :
3524 : : /**
3525 : : * Construct flow action array.
3526 : : *
3527 : : * For action template contains dynamic actions, these actions need to
3528 : : * be updated according to the rte_flow action during flow creation.
3529 : : *
3530 : : * @param[in] dev
3531 : : * Pointer to the rte_eth_dev structure.
3532 : : * @param[in] job
3533 : : * Pointer to job descriptor.
3534 : : * @param[in] hw_acts
3535 : : * Pointer to translated actions from template.
3536 : : * @param[in] it_idx
3537 : : * Item template index the action template refer to.
3538 : : * @param[in] actions
3539 : : * Array of rte_flow action need to be checked.
3540 : : * @param[in] rule_acts
3541 : : * Array of DR rule actions to be used during flow creation..
3542 : : * @param[in] acts_num
3543 : : * Pointer to the real acts_num flow has.
3544 : : *
3545 : : * @return
3546 : : * 0 on success, negative value otherwise and rte_errno is set.
3547 : : */
3548 : : static __rte_always_inline int
3549 : : flow_hw_modify_field_construct(struct mlx5_modification_cmd *mhdr_cmd,
3550 : : struct mlx5_action_construct_data *act_data,
3551 : : const struct mlx5_hw_actions *hw_acts,
3552 : : const struct rte_flow_action *action)
3553 : : {
3554 : 0 : const struct rte_flow_action_modify_field *mhdr_action = action->conf;
3555 : 0 : uint8_t values[16] = { 0 };
3556 : : unaligned_uint32_t *value_p;
3557 : : uint32_t i;
3558 : : struct field_modify_info *field;
3559 : :
3560 [ # # # # : 0 : if (!hw_acts->mhdr)
# # # # #
# ]
3561 : : return -1;
3562 [ # # # # : 0 : if (hw_acts->mhdr->shared || act_data->modify_header.shared)
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
3563 : : return 0;
3564 : : MLX5_ASSERT(mhdr_action->operation == RTE_FLOW_MODIFY_SET ||
3565 : : mhdr_action->operation == RTE_FLOW_MODIFY_ADD);
3566 [ # # # # : 0 : if (mhdr_action->src.field != RTE_FLOW_FIELD_VALUE &&
# # # # #
# ]
3567 : : mhdr_action->src.field != RTE_FLOW_FIELD_POINTER)
3568 : : return 0;
3569 [ # # # # : 0 : if (mhdr_action->src.field == RTE_FLOW_FIELD_VALUE)
# # # # #
# ]
3570 [ # # # # : 0 : rte_memcpy(values, &mhdr_action->src.value, sizeof(values));
# # # # #
# ]
3571 : : else
3572 [ # # # # : 0 : rte_memcpy(values, mhdr_action->src.pvalue, sizeof(values));
# # # # #
# ]
3573 [ # # # # : 0 : if (mhdr_action->dst.field == RTE_FLOW_FIELD_META ||
# # # # #
# ]
3574 [ # # # # : 0 : mhdr_action->dst.field == RTE_FLOW_FIELD_TAG ||
# # # # #
# ]
3575 [ # # # # : 0 : mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
# # # # #
# ]
3576 : : mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
3577 : 0 : uint8_t tag_index = flow_tag_index_get(&mhdr_action->dst);
3578 : :
3579 : : value_p = (unaligned_uint32_t *)values;
3580 [ # # # # : 0 : if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
# # # # #
# # # # #
# # # # #
# ]
3581 : : tag_index == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
3582 [ # # # # : 0 : *value_p = rte_cpu_to_be_32(*value_p << 16);
# # # # #
# ]
3583 : : else
3584 [ # # # # : 0 : *value_p = rte_cpu_to_be_32(*value_p);
# # # # #
# ]
3585 [ # # # # : 0 : } else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI ||
# # # # #
# ]
3586 : : mhdr_action->dst.field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE) {
3587 : : uint32_t tmp;
3588 : :
3589 : : /*
3590 : : * Both QFI and Geneve option type are passed as an uint8_t integer,
3591 : : * but it is accessed through a 2nd least significant byte of a 32-bit
3592 : : * field in modify header command.
3593 : : */
3594 : 0 : tmp = values[0];
3595 : : value_p = (unaligned_uint32_t *)values;
3596 [ # # # # : 0 : *value_p = rte_cpu_to_be_32(tmp << 8);
# # # # #
# ]
3597 : : }
3598 : 0 : i = act_data->modify_header.mhdr_cmds_off;
3599 : 0 : field = act_data->modify_header.field;
3600 : : do {
3601 : : uint32_t off_b;
3602 : : uint32_t mask;
3603 : : uint32_t data;
3604 : : const uint8_t *mask_src;
3605 : :
3606 [ # # # # : 0 : if (i >= act_data->modify_header.mhdr_cmds_end)
# # # # #
# # # # #
# # # # #
# ]
3607 : : return -1;
3608 [ # # # # : 0 : if (flow_hw_mhdr_cmd_is_nop(&mhdr_cmd[i])) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
3609 : 0 : ++i;
3610 : 0 : continue;
3611 : : }
3612 : 0 : mask_src = (const uint8_t *)act_data->modify_header.mask;
3613 : 0 : mask = flow_dv_fetch_field(mask_src + field->offset, field->size);
3614 [ # # # # : 0 : if (!mask) {
# # # # #
# # # # #
# # # # #
# ]
3615 : 0 : ++field;
3616 : 0 : continue;
3617 : : }
3618 : 0 : off_b = rte_bsf32(mask);
3619 : 0 : data = flow_dv_fetch_field(values + field->offset, field->size);
3620 : : /*
3621 : : * IPv6 DSCP uses OUT_IPV6_TRAFFIC_CLASS as ID but it starts from 2
3622 : : * bits left. Shift the data left for IPv6 DSCP
3623 : : */
3624 [ # # # # : 0 : if (field->id == MLX5_MODI_OUT_IPV6_TRAFFIC_CLASS &&
# # # # #
# # # #
# ]
3625 [ # # # # : 0 : mhdr_action->dst.field == RTE_FLOW_FIELD_IPV6_DSCP)
# # ]
3626 : 0 : data <<= MLX5_IPV6_HDR_DSCP_SHIFT;
3627 : 0 : data = (data & mask) >> off_b;
3628 [ # # # # : 0 : mhdr_cmd[i++].data1 = rte_cpu_to_be_32(data);
# # # # #
# # # # #
# # # # #
# ]
3629 : 0 : ++field;
3630 [ # # # # : 0 : } while (field->size);
# # # # #
# # # # #
# # # # #
# ]
3631 : : return 0;
3632 : : }
3633 : :
3634 : : /**
3635 : : * Release any actions allocated for the flow rule during actions construction.
3636 : : *
3637 : : * @param[in] flow
3638 : : * Pointer to flow structure.
3639 : : */
3640 : : static void
3641 : 0 : flow_hw_release_actions(struct rte_eth_dev *dev,
3642 : : uint32_t queue,
3643 : : struct rte_flow_hw *flow)
3644 : : {
3645 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3646 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3647 [ # # ]: 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
3648 : :
3649 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
3650 : 0 : flow_hw_jump_release(dev, flow->jump);
3651 [ # # ]: 0 : else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
3652 : 0 : mlx5_hrxq_obj_release(dev, flow->hrxq);
3653 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
3654 : 0 : flow_hw_age_count_release(priv, queue, flow, NULL);
3655 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
3656 : 0 : mlx5_ipool_free(pool->idx_pool, mlx5_flow_hw_aux_get_mtr_id(flow, aux));
3657 : 0 : }
3658 : :
3659 : : /**
3660 : : * Construct flow action array.
3661 : : *
3662 : : * For action template contains dynamic actions, these actions need to
3663 : : * be updated according to the rte_flow action during flow creation.
3664 : : *
3665 : : * @param[in] dev
3666 : : * Pointer to the rte_eth_dev structure.
3667 : : * @param[in] flow
3668 : : * Pointer to flow structure.
3669 : : * @param[in] ap
3670 : : * Pointer to container for temporarily constructed actions' parameters.
3671 : : * @param[in] hw_acts
3672 : : * Pointer to translated actions from template.
3673 : : * @param[in] items_flags
3674 : : * Item flags.
3675 : : * @param[in] table
3676 : : * Pointer to the template table.
3677 : : * @param[in] actions
3678 : : * Array of rte_flow action need to be checked.
3679 : : * @param[in] rule_acts
3680 : : * Array of DR rule actions to be used during flow creation..
3681 : : * @param[in] acts_num
3682 : : * Pointer to the real acts_num flow has.
3683 : : *
3684 : : * @return
3685 : : * 0 on success, negative value otherwise and rte_errno is set.
3686 : : */
3687 : : static __rte_always_inline int
3688 : : flow_hw_actions_construct(struct rte_eth_dev *dev,
3689 : : struct rte_flow_hw *flow,
3690 : : struct mlx5_flow_hw_action_params *ap,
3691 : : const struct mlx5_hw_action_template *hw_at,
3692 : : uint64_t item_flags,
3693 : : struct rte_flow_template_table *table,
3694 : : const struct rte_flow_action actions[],
3695 : : struct mlx5dr_rule_action *rule_acts,
3696 : : uint32_t queue,
3697 : : struct rte_flow_error *error)
3698 : : {
3699 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3700 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
3701 : : struct mlx5_action_construct_data *act_data;
3702 : 0 : const struct rte_flow_actions_template *at = hw_at->action_template;
3703 : : const struct mlx5_hw_actions *hw_acts = &hw_at->acts;
3704 : : const struct rte_flow_action *action;
3705 : : const struct rte_flow_action_raw_encap *raw_encap_data;
3706 : : const struct rte_flow_action_ipv6_ext_push *ipv6_push;
3707 : : const struct rte_flow_item *enc_item = NULL;
3708 : : const struct rte_flow_action_ethdev *port_action = NULL;
3709 : : const struct rte_flow_action_age *age = NULL;
3710 : : const struct rte_flow_action_nat64 *nat64_c = NULL;
3711 : 0 : struct rte_flow_attr attr = {
3712 : : .ingress = 1,
3713 : : };
3714 : : uint32_t ft_flag;
3715 : : int ret;
3716 : 0 : size_t encap_len = 0;
3717 : : uint32_t age_idx = 0;
3718 : : uint32_t mtr_idx = 0;
3719 : : struct mlx5_aso_mtr *aso_mtr;
3720 : : struct mlx5_multi_pattern_segment *mp_segment = NULL;
3721 : : struct rte_flow_hw_aux *aux;
3722 : 0 : bool is_root = mlx5_group_id_is_root(table->grp->group_id);
3723 : :
3724 : : attr.group = table->grp->group_id;
3725 : 0 : ft_flag = mlx5_hw_act_flag[!!table->grp->group_id][table->type];
3726 : 0 : if (table->type >= MLX5DR_TABLE_TYPE_FDB && table->type < MLX5DR_TABLE_TYPE_MAX) {
3727 : 0 : attr.transfer = 1;
3728 : : attr.ingress = 1;
3729 [ # # # # : 0 : } else if (table->type == MLX5DR_TABLE_TYPE_NIC_TX) {
# # # # #
# ]
3730 : 0 : attr.egress = 1;
3731 : 0 : attr.ingress = 0;
3732 : : } else {
3733 : : attr.ingress = 1;
3734 : : }
3735 [ # # # # : 0 : if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0 && !hw_acts->mhdr->shared) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
3736 : 0 : uint16_t pos = hw_acts->mhdr->pos;
3737 : :
3738 : 0 : mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
3739 [ # # # # : 0 : if (!mp_segment || !mp_segment->mhdr_action)
# # # # #
# # # # #
# # # # #
# ]
3740 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3741 : : NULL, "No modify header action found");
3742 : 0 : rule_acts[pos].action = mp_segment->mhdr_action;
3743 : : /* offset is relative to DR action */
3744 : 0 : rule_acts[pos].modify_header.offset =
3745 : 0 : flow->res_idx - mp_segment->head_index;
3746 : 0 : rule_acts[pos].modify_header.data =
3747 : 0 : (uint8_t *)ap->mhdr_cmd;
3748 : : MLX5_ASSERT(hw_acts->mhdr->mhdr_cmds_num <= MLX5_MHDR_MAX_CMD);
3749 : 0 : rte_memcpy(ap->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
3750 [ # # # # : 0 : sizeof(*ap->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
# # # # #
# ]
3751 : : }
3752 [ # # # # : 0 : LIST_FOREACH(act_data, &hw_acts->act_list, next) {
# # # # #
# ]
3753 : : uint32_t jump_group;
3754 : : uint32_t tag;
3755 : : struct mlx5_hw_jump_action *jump;
3756 : : struct mlx5_hrxq *hrxq;
3757 : : uint32_t ct_idx;
3758 : : cnt_id_t cnt_id;
3759 : : uint32_t *cnt_queue;
3760 : : uint32_t mtr_id;
3761 : : struct rte_flow_template_table *jump_table;
3762 : :
3763 : 0 : action = &actions[act_data->action_src];
3764 : : /*
3765 : : * action template construction replaces
3766 : : * OF_SET_VLAN_VID with MODIFY_FIELD
3767 : : */
3768 : 0 : if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
3769 : : MLX5_ASSERT(act_data->type ==
3770 : : RTE_FLOW_ACTION_TYPE_MODIFY_FIELD);
3771 : : else
3772 : : MLX5_ASSERT(action->type ==
3773 : : RTE_FLOW_ACTION_TYPE_INDIRECT ||
3774 : : (int)action->type == act_data->type);
3775 [ # # # # : 0 : switch ((int)act_data->type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
3776 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
3777 : 0 : act_data->indirect_list_cb(dev, act_data, action,
3778 : 0 : &rule_acts[act_data->action_dst]);
3779 : 0 : break;
3780 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
3781 : 0 : if (flow_hw_shared_action_construct
3782 : : (dev, queue, action, table,
3783 : 0 : item_flags, at->action_flags, flow,
3784 [ # # # # : 0 : &rule_acts[act_data->action_dst]))
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# ]
3785 : 0 : goto error;
3786 : : break;
3787 : : case RTE_FLOW_ACTION_TYPE_VOID:
3788 : : break;
3789 : 0 : case RTE_FLOW_ACTION_TYPE_MARK:
3790 : 0 : tag = mlx5_flow_mark_set
3791 : : (((const struct rte_flow_action_mark *)
3792 : 0 : (action->conf))->id);
3793 : 0 : rule_acts[act_data->action_dst].tag.value = tag;
3794 : 0 : break;
3795 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3796 : 0 : rule_acts[act_data->action_dst].push_vlan.vlan_hdr =
3797 : 0 : vlan_hdr_to_be32(action);
3798 : 0 : break;
3799 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
3800 : 0 : jump_group = ((const struct rte_flow_action_jump *)
3801 : 0 : action->conf)->group;
3802 : 0 : jump = flow_hw_jump_action_register
3803 : 0 : (dev, &table->cfg, jump_group, NULL);
3804 [ # # # # : 0 : if (!jump)
# # # # #
# ]
3805 : 0 : goto error;
3806 : 0 : rule_acts[act_data->action_dst].action =
3807 [ # # # # : 0 : (!!attr.group) ? jump->hws_action : jump->root_action;
# # # # #
# ]
3808 : 0 : flow->jump = jump;
3809 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP;
3810 : 0 : break;
3811 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
3812 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
3813 : 0 : hrxq = flow_hw_tir_action_register(dev, ft_flag, action);
3814 [ # # # # : 0 : if (!hrxq)
# # # # #
# ]
3815 : 0 : goto error;
3816 : 0 : rule_acts[act_data->action_dst].action = hrxq->action;
3817 : 0 : flow->hrxq = hrxq;
3818 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ;
3819 : 0 : break;
3820 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
3821 : 0 : if (flow_hw_shared_action_get
3822 : : (dev, act_data, item_flags,
3823 : 0 : &rule_acts[act_data->action_dst]))
3824 : 0 : goto error;
3825 : : break;
3826 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3827 : 0 : enc_item = ((const struct rte_flow_action_vxlan_encap *)
3828 : 0 : action->conf)->definition;
3829 [ # # # # : 0 : if (mlx5_flow_dv_convert_encap_data(enc_item, ap->encap_data,
# # # # #
# ]
3830 : : &encap_len, NULL))
3831 : 0 : goto error;
3832 : : break;
3833 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3834 : 0 : enc_item = ((const struct rte_flow_action_nvgre_encap *)
3835 : 0 : action->conf)->definition;
3836 [ # # # # : 0 : if (mlx5_flow_dv_convert_encap_data(enc_item, ap->encap_data,
# # # # #
# ]
3837 : : &encap_len, NULL))
3838 : 0 : goto error;
3839 : : break;
3840 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3841 : 0 : raw_encap_data =
3842 : : (const struct rte_flow_action_raw_encap *)
3843 : : action->conf;
3844 : : MLX5_ASSERT(raw_encap_data->size == act_data->encap.len);
3845 [ # # # # : 0 : if (unlikely(act_data->encap.len > MLX5_ENCAP_MAX_LEN))
# # # # #
# ]
3846 : : return -1;
3847 [ # # # # : 0 : rte_memcpy(ap->encap_data, raw_encap_data->data, act_data->encap.len);
# # # # #
# ]
3848 : : break;
3849 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
3850 : 0 : ipv6_push =
3851 : : (const struct rte_flow_action_ipv6_ext_push *)action->conf;
3852 : : MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
3853 [ # # # # : 0 : if (unlikely(act_data->ipv6_ext.len > MLX5_PUSH_MAX_LEN))
# # # # #
# ]
3854 : : return -1;
3855 [ # # # # : 0 : rte_memcpy(ap->ipv6_push_data, ipv6_push->data,
# # # # #
# ]
3856 : : act_data->ipv6_ext.len);
3857 : : break;
3858 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
3859 [ # # # # : 0 : if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
# # # # #
# ]
3860 : 0 : ret = flow_hw_set_vlan_vid_construct(dev, ap->mhdr_cmd,
3861 : : act_data,
3862 : : hw_acts,
3863 : : action);
3864 : : else
3865 [ # # # # : 0 : ret = flow_hw_modify_field_construct(ap->mhdr_cmd,
# # # # #
# ]
3866 : : act_data,
3867 : : hw_acts,
3868 : : action);
3869 [ # # # # : 0 : if (ret)
# # # # #
# ]
3870 : 0 : goto error;
3871 : : break;
3872 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
3873 : 0 : port_action = action->conf;
3874 [ # # # # : 0 : if (!priv->hw_vport[port_action->port_id])
# # # # #
# ]
3875 : 0 : goto error;
3876 : 0 : rule_acts[act_data->action_dst].action =
3877 : : priv->hw_vport[port_action->port_id];
3878 : 0 : break;
3879 : 0 : case RTE_FLOW_ACTION_TYPE_QUOTA:
3880 : 0 : flow_hw_construct_quota(priv,
3881 : 0 : rule_acts + act_data->action_dst,
3882 : : act_data->shared_meter.id);
3883 : 0 : break;
3884 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
3885 [ # # # # : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
# # # # #
# ]
3886 : 0 : age = action->conf;
3887 : : /*
3888 : : * First, create the AGE parameter, then create its
3889 : : * counter later:
3890 : : * Regular counter - in next case.
3891 : : * Indirect counter - update it after the loop.
3892 : : */
3893 : 0 : age_idx = mlx5_hws_age_action_create(priv, queue, 0,
3894 : : age,
3895 : : flow->res_idx,
3896 : : error);
3897 [ # # # # : 0 : if (age_idx == 0)
# # # # #
# ]
3898 : 0 : goto error;
3899 : : mlx5_flow_hw_aux_set_age_idx(flow, aux, age_idx);
3900 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX;
3901 [ # # # # : 0 : if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT)
# # # # #
# ]
3902 : : /*
3903 : : * When AGE uses indirect counter, no need to
3904 : : * create counter but need to update it with the
3905 : : * AGE parameter, will be done after the loop.
3906 : : */
3907 : : break;
3908 : : /* Fall-through. */
3909 : : case RTE_FLOW_ACTION_TYPE_COUNT:
3910 : : cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
3911 : : ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id,
3912 : : age_idx, 0);
3913 [ # # # # : 0 : if (ret != 0) {
# # # # #
# ]
3914 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
3915 : : action, "Failed to allocate flow counter");
3916 : 0 : goto error;
3917 : : }
3918 : 0 : ret = mlx5_hws_cnt_pool_get_action_offset
3919 : : (priv->hws_cpool,
3920 : : cnt_id,
3921 : : &rule_acts[act_data->action_dst].action,
3922 [ # # # # : 0 : &rule_acts[act_data->action_dst].counter.offset,
# # # # #
# ]
3923 : : is_root
3924 : : );
3925 : : if (ret != 0)
3926 : : goto error;
3927 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3928 : 0 : flow->cnt_id = cnt_id;
3929 : 0 : break;
3930 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
3931 : 0 : ret = mlx5_hws_cnt_pool_get_action_offset
3932 : : (priv->hws_cpool,
3933 : : act_data->shared_counter.id,
3934 : : &rule_acts[act_data->action_dst].action,
3935 [ # # # # : 0 : &rule_acts[act_data->action_dst].counter.offset,
# # # # #
# ]
3936 : : is_root
3937 : : );
3938 : : if (ret != 0)
3939 : : goto error;
3940 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
3941 : 0 : flow->cnt_id = act_data->shared_counter.id;
3942 : 0 : break;
3943 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
3944 : 0 : ct_idx = MLX5_INDIRECT_ACTION_IDX_GET(action->conf);
3945 : 0 : if (flow_hw_ct_compile(dev, queue, ct_idx,
3946 : 0 : &rule_acts[act_data->action_dst]))
3947 : 0 : goto error;
3948 : : break;
3949 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
3950 : 0 : mtr_id = act_data->shared_meter.id &
3951 : : ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3952 : : /* Find ASO object. */
3953 : 0 : aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id);
3954 [ # # # # : 0 : if (!aso_mtr)
# # # # #
# ]
3955 : 0 : goto error;
3956 : 0 : rule_acts[act_data->action_dst].action =
3957 : 0 : pool->action;
3958 : 0 : rule_acts[act_data->action_dst].aso_meter.offset =
3959 : 0 : aso_mtr->offset;
3960 : 0 : break;
3961 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
3962 : : /*
3963 : : * Allocate meter directly will slow down flow
3964 : : * insertion rate.
3965 : : */
3966 : : ret = flow_hw_meter_mark_compile(dev,
3967 : 0 : act_data->action_dst, action,
3968 : : rule_acts, &mtr_idx, MLX5_HW_INV_QUEUE, error);
3969 : : if (ret != 0)
3970 : 0 : goto error;
3971 [ # # # # : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
# # # # #
# ]
3972 : : mlx5_flow_hw_aux_set_mtr_id(flow, aux, mtr_idx);
3973 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MTR_ID;
3974 : 0 : break;
3975 : 0 : case RTE_FLOW_ACTION_TYPE_NAT64:
3976 : 0 : nat64_c = action->conf;
3977 : 0 : rule_acts[act_data->action_dst].action = hw_acts->nat64[nat64_c->type];
3978 : 0 : break;
3979 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
3980 : 0 : jump_table = ((const struct rte_flow_action_jump_to_table_index *)
3981 : 0 : action->conf)->table;
3982 [ # # # # : 0 : if (likely(!rte_flow_template_table_resizable(dev->data->port_id,
# # # # #
# ]
3983 : : &table->cfg.attr))) {
3984 : 0 : rule_acts[act_data->action_dst].action =
3985 : 0 : jump_table->matcher_info[0].jump;
3986 : : } else {
3987 : : uint32_t selector;
3988 : 0 : rte_rwlock_read_lock(&table->matcher_replace_rwlk);
3989 : 0 : selector = table->matcher_selector;
3990 : 0 : rule_acts[act_data->action_dst].action =
3991 : 0 : jump_table->matcher_info[selector].jump;
3992 : 0 : rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
3993 : : }
3994 : 0 : rule_acts[act_data->action_dst].jump_to_matcher.offset =
3995 : : ((const struct rte_flow_action_jump_to_table_index *)
3996 : 0 : action->conf)->index;
3997 : 0 : break;
3998 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_MIRROR: {
3999 : 0 : const struct mlx5_mirror *mirror = action->conf;
4000 : :
4001 : 0 : rule_acts[act_data->action_dst].action = mirror->mirror_action;
4002 : 0 : break;
4003 : : }
4004 : : default:
4005 : : break;
4006 : : }
4007 : : }
4008 [ # # # # : 0 : if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) {
# # # # #
# ]
4009 : : /* If indirect count is used, then CNT_ID flag should be set. */
4010 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
4011 [ # # # # : 0 : if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE) {
# # # # #
# ]
4012 : : /* If indirect AGE is used, then AGE_IDX flag should be set. */
4013 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX);
4014 [ # # # # : 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
# # # # #
# ]
4015 : 0 : age_idx = mlx5_flow_hw_aux_get_age_idx(flow, aux) &
4016 : : MLX5_HWS_AGE_IDX_MASK;
4017 [ # # # # : 0 : if (mlx5_hws_cnt_age_get(priv->hws_cpool, flow->cnt_id) != age_idx)
# # # # #
# # # # #
# # # # #
# ]
4018 : : /*
4019 : : * This is first use of this indirect counter
4020 : : * for this indirect AGE, need to increase the
4021 : : * number of counters.
4022 : : */
4023 : : mlx5_hws_age_nb_cnt_increase(priv, age_idx);
4024 : : }
4025 : : /*
4026 : : * Update this indirect counter the indirect/direct AGE in which
4027 : : * using it.
4028 : : */
4029 [ # # # # : 0 : mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, age_idx);
# # # # #
# ]
4030 : : }
4031 [ # # # # : 0 : if (hw_acts->encap_decap && !hw_acts->encap_decap->shared) {
# # # # #
# # # # #
# # # # #
# ]
4032 [ # # # # : 0 : int ix = mlx5_multi_pattern_reformat_to_index(hw_acts->encap_decap->action_type);
# # # # #
# ]
4033 : 0 : struct mlx5dr_rule_action *ra = &rule_acts[hw_acts->encap_decap_pos];
4034 : :
4035 [ # # # # : 0 : if (ix < 0)
# # # # #
# ]
4036 : 0 : goto error;
4037 [ # # # # : 0 : if (!mp_segment)
# # # # #
# ]
4038 : 0 : mp_segment = mlx5_multi_pattern_segment_find(table, flow->res_idx);
4039 [ # # # # : 0 : if (!mp_segment || !mp_segment->reformat_action[ix])
# # # # #
# # # # #
# # # # #
# ]
4040 : 0 : goto error;
4041 : 0 : ra->action = mp_segment->reformat_action[ix];
4042 : : /* reformat offset is relative to selected DR action */
4043 : 0 : ra->reformat.offset = flow->res_idx - mp_segment->head_index;
4044 : 0 : ra->reformat.data = ap->encap_data;
4045 : : }
4046 [ # # # # : 0 : if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
# # # # #
# # # # #
# # # # #
# ]
4047 : 0 : rule_acts[hw_acts->push_remove_pos].ipv6_ext.offset =
4048 : 0 : flow->res_idx - 1;
4049 : 0 : rule_acts[hw_acts->push_remove_pos].ipv6_ext.header = ap->ipv6_push_data;
4050 : : }
4051 [ # # # # : 0 : if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) {
# # # # #
# ]
4052 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
4053 : 0 : flow->cnt_id = hw_acts->cnt_id;
4054 : : }
4055 : : return 0;
4056 : :
4057 : 0 : error:
4058 : 0 : flow_hw_release_actions(dev, queue, flow);
4059 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4060 : : NULL, "Action construction failed");
4061 : : }
4062 : :
4063 : : static const struct rte_flow_item *
4064 : 0 : flow_hw_get_rule_items(struct rte_eth_dev *dev,
4065 : : const struct rte_flow_template_table *table,
4066 : : const struct rte_flow_item items[],
4067 : : uint8_t pattern_template_index,
4068 : : struct mlx5_flow_hw_pattern_params *pp)
4069 : : {
4070 : 0 : struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
4071 : :
4072 : : /* Only one implicit item can be added to flow rule pattern. */
4073 : : MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
4074 : : /* At least one item was allocated in pattern params for items. */
4075 : : MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
4076 [ # # ]: 0 : if (pt->implicit_port) {
4077 [ # # ]: 0 : if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
4078 : 0 : rte_errno = ENOMEM;
4079 : 0 : return NULL;
4080 : : }
4081 : : /* Set up represented port item in pattern params. */
4082 : 0 : pp->port_spec = (struct rte_flow_item_ethdev){
4083 : 0 : .port_id = dev->data->port_id,
4084 : : };
4085 : 0 : pp->items[0] = (struct rte_flow_item){
4086 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
4087 : 0 : .spec = &pp->port_spec,
4088 : : };
4089 [ # # ]: 0 : rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
4090 : 0 : return pp->items;
4091 [ # # ]: 0 : } else if (pt->implicit_tag) {
4092 [ # # ]: 0 : if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
4093 : 0 : rte_errno = ENOMEM;
4094 : 0 : return NULL;
4095 : : }
4096 : : /* Set up tag item in pattern params. */
4097 : 0 : pp->tag_spec = (struct rte_flow_item_tag){
4098 : : .data = flow_hw_tx_tag_regc_value(dev),
4099 : : };
4100 : 0 : pp->items[0] = (struct rte_flow_item){
4101 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4102 : 0 : .spec = &pp->tag_spec,
4103 : : };
4104 : 0 : rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
4105 : 0 : return pp->items;
4106 : : } else {
4107 : : return items;
4108 : : }
4109 : : }
4110 : :
4111 : : /**
4112 : : * Enqueue HW steering flow creation.
4113 : : *
4114 : : * The flow will be applied to the HW only if the postpone bit is not set or
4115 : : * the extra push function is called.
4116 : : * The flow creation status should be checked from dequeue result.
4117 : : *
4118 : : * @param[in] dev
4119 : : * Pointer to the rte_eth_dev structure.
4120 : : * @param[in] queue
4121 : : * The queue to create the flow.
4122 : : * @param[in] attr
4123 : : * Pointer to the flow operation attributes.
4124 : : * @param[in] table
4125 : : * Pointer to the template table.
4126 : : * @param[in] insertion_type
4127 : : * Insertion type for flow rules.
4128 : : * @param[in] rule_index
4129 : : * The item pattern flow follows from the table.
4130 : : * @param[in] items
4131 : : * Items with flow spec value.
4132 : : * @param[in] pattern_template_index
4133 : : * The item pattern flow follows from the table.
4134 : : * @param[in] actions
4135 : : * Action with flow spec value.
4136 : : * @param[in] action_template_index
4137 : : * The action pattern flow follows from the table.
4138 : : * @param[in] user_data
4139 : : * Pointer to the user_data.
4140 : : * @param[out] error
4141 : : * Pointer to error structure.
4142 : : *
4143 : : * @return
4144 : : * Flow pointer on success, NULL otherwise and rte_errno is set.
4145 : : */
4146 : : static __rte_always_inline struct rte_flow *
4147 : : flow_hw_async_flow_create_generic(struct rte_eth_dev *dev,
4148 : : uint32_t queue,
4149 : : const struct rte_flow_op_attr *attr,
4150 : : struct rte_flow_template_table *table,
4151 : : enum rte_flow_table_insertion_type insertion_type,
4152 : : uint32_t rule_index,
4153 : : const struct rte_flow_item items[],
4154 : : uint8_t pattern_template_index,
4155 : : const struct rte_flow_action actions[],
4156 : : uint8_t action_template_index,
4157 : : void *user_data,
4158 : : struct rte_flow_error *error)
4159 : : {
4160 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4161 : 0 : struct mlx5dr_rule_attr rule_attr = {
4162 : : .queue_id = queue,
4163 : : .user_data = user_data,
4164 : 0 : .burst = attr->postpone,
4165 : : };
4166 : : struct mlx5dr_rule_action *rule_acts;
4167 : : struct rte_flow_hw *flow = NULL;
4168 : : const struct rte_flow_item *rule_items;
4169 : 0 : struct rte_flow_error sub_error = { 0 };
4170 : 0 : uint32_t flow_idx = 0;
4171 : 0 : uint32_t res_idx = 0;
4172 : : int ret;
4173 : :
4174 : 0 : if (mlx5_fp_debug_enabled()) {
4175 [ # # # # : 0 : if (flow_hw_async_create_validate(dev, queue, table, insertion_type, rule_index,
# # ]
4176 : : items, pattern_template_index, actions, action_template_index, error))
4177 : : return NULL;
4178 : : }
4179 : 0 : flow = mlx5_ipool_malloc(table->flow_pool, &flow_idx);
4180 [ # # # # : 0 : if (!flow) {
# # ]
4181 : 0 : rte_errno = ENOMEM;
4182 : 0 : goto error;
4183 : : }
4184 : 0 : flow->nt_rule = false;
4185 : : rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4186 : : /*
4187 : : * Set the table here in order to know the destination table
4188 : : * when free the flow afterward.
4189 : : */
4190 : 0 : flow->table = table;
4191 : 0 : flow->mt_idx = pattern_template_index;
4192 : 0 : flow->idx = flow_idx;
4193 [ # # # # : 0 : if (table->resource) {
# # ]
4194 : 0 : mlx5_ipool_malloc(table->resource, &res_idx);
4195 [ # # # # : 0 : if (!res_idx) {
# # ]
4196 : 0 : rte_errno = ENOMEM;
4197 : 0 : goto error;
4198 : : }
4199 : 0 : flow->res_idx = res_idx;
4200 : : } else {
4201 : 0 : flow->res_idx = flow_idx;
4202 : : }
4203 : 0 : flow->flags = 0;
4204 : : /*
4205 : : * Set the flow operation type here in order to know if the flow memory
4206 : : * should be freed or not when get the result from dequeue.
4207 : : */
4208 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4209 : 0 : flow->user_data = user_data;
4210 : 0 : rule_attr.user_data = flow;
4211 : : /*
4212 : : * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
4213 : : * for rule insertion hints.
4214 : : */
4215 [ # # # # ]: 0 : flow->rule_idx = (rule_index == UINT32_MAX) ? flow->res_idx - 1 : rule_index;
4216 : 0 : rule_attr.rule_idx = flow->rule_idx;
4217 : : /*
4218 : : * Construct the flow actions based on the input actions.
4219 : : * The implicitly appended action is always fixed, like metadata
4220 : : * copy action from FDB to NIC Rx.
4221 : : * No need to copy and contrust a new "actions" list based on the
4222 : : * user's input, in order to save the cost.
4223 : : */
4224 [ # # # # : 0 : if (flow_hw_actions_construct(dev, flow, &priv->hw_q[queue].ap,
# # ]
4225 : 0 : &table->ats[action_template_index],
4226 [ # # # # : 0 : table->its[pattern_template_index]->item_flags,
# # ]
4227 : : flow->table, actions,
4228 : : rule_acts, queue, &sub_error))
4229 : 0 : goto error;
4230 : : if (insertion_type == RTE_FLOW_TABLE_INSERTION_TYPE_INDEX) {
4231 : : rule_items = items;
4232 : : } else {
4233 : 0 : rule_items = flow_hw_get_rule_items(dev, table, items,
4234 : 0 : pattern_template_index, &priv->hw_q[queue].pp);
4235 [ # # # # ]: 0 : if (!rule_items)
4236 : 0 : goto error;
4237 : : }
4238 [ # # # # : 0 : if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
# # ]
4239 : 0 : ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
4240 : : pattern_template_index, rule_items,
4241 : : action_template_index, rule_acts,
4242 : : &rule_attr,
4243 : 0 : (struct mlx5dr_rule *)flow->rule);
4244 : : } else {
4245 [ # # # # : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
# # ]
4246 : : uint32_t selector;
4247 : :
4248 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE;
4249 : 0 : rte_rwlock_read_lock(&table->matcher_replace_rwlk);
4250 : 0 : selector = table->matcher_selector;
4251 : 0 : ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
4252 : : pattern_template_index, rule_items,
4253 : : action_template_index, rule_acts,
4254 : : &rule_attr,
4255 : 0 : (struct mlx5dr_rule *)flow->rule);
4256 : 0 : rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
4257 : 0 : aux->matcher_selector = selector;
4258 : 0 : flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR;
4259 : : }
4260 : :
4261 [ # # # # : 0 : if (likely(!ret)) {
# # ]
4262 : 0 : flow_hw_q_inc_flow_ops(priv, queue);
4263 : 0 : return (struct rte_flow *)flow;
4264 : : }
4265 : 0 : error:
4266 [ # # # # : 0 : if (table->resource && res_idx)
# # # # #
# # # ]
4267 : 0 : mlx5_ipool_free(table->resource, res_idx);
4268 [ # # # # : 0 : if (flow_idx)
# # ]
4269 : 0 : mlx5_ipool_free(table->flow_pool, flow_idx);
4270 [ # # # # : 0 : if (sub_error.cause != RTE_FLOW_ERROR_TYPE_NONE && error != NULL)
# # # # #
# # # ]
4271 : 0 : *error = sub_error;
4272 : : else
4273 : 0 : rte_flow_error_set(error, rte_errno,
4274 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4275 : : "fail to create rte flow");
4276 : : return NULL;
4277 : : }
4278 : :
4279 : : static struct rte_flow *
4280 : 0 : flow_hw_async_flow_create(struct rte_eth_dev *dev,
4281 : : uint32_t queue,
4282 : : const struct rte_flow_op_attr *attr,
4283 : : struct rte_flow_template_table *table,
4284 : : const struct rte_flow_item items[],
4285 : : uint8_t pattern_template_index,
4286 : : const struct rte_flow_action actions[],
4287 : : uint8_t action_template_index,
4288 : : void *user_data,
4289 : : struct rte_flow_error *error)
4290 : : {
4291 : : uint32_t rule_index = UINT32_MAX;
4292 : :
4293 [ # # ]: 0 : return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4294 : : RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN, rule_index,
4295 : : items, pattern_template_index, actions, action_template_index,
4296 : : user_data, error);
4297 : : }
4298 : :
4299 : : static struct rte_flow *
4300 : 0 : flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
4301 : : uint32_t queue,
4302 : : const struct rte_flow_op_attr *attr,
4303 : : struct rte_flow_template_table *table,
4304 : : uint32_t rule_index,
4305 : : const struct rte_flow_action actions[],
4306 : : uint8_t action_template_index,
4307 : : void *user_data,
4308 : : struct rte_flow_error *error)
4309 : : {
4310 : 0 : struct rte_flow_item items[] = {{.type = RTE_FLOW_ITEM_TYPE_END,}};
4311 : : uint8_t pattern_template_index = 0;
4312 : :
4313 [ # # ]: 0 : return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4314 : : RTE_FLOW_TABLE_INSERTION_TYPE_INDEX, rule_index,
4315 : : items, pattern_template_index, actions, action_template_index,
4316 : : user_data, error);
4317 : : }
4318 : :
4319 : : static struct rte_flow *
4320 : 0 : flow_hw_async_flow_create_by_index_with_pattern(struct rte_eth_dev *dev,
4321 : : uint32_t queue,
4322 : : const struct rte_flow_op_attr *attr,
4323 : : struct rte_flow_template_table *table,
4324 : : uint32_t rule_index,
4325 : : const struct rte_flow_item items[],
4326 : : uint8_t pattern_template_index,
4327 : : const struct rte_flow_action actions[],
4328 : : uint8_t action_template_index,
4329 : : void *user_data,
4330 : : struct rte_flow_error *error)
4331 : : {
4332 [ # # ]: 0 : return flow_hw_async_flow_create_generic(dev, queue, attr, table,
4333 : : RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN, rule_index,
4334 : : items, pattern_template_index, actions, action_template_index,
4335 : : user_data, error);
4336 : : }
4337 : :
4338 : : /**
4339 : : * Enqueue HW steering flow update.
4340 : : *
4341 : : * The flow will be applied to the HW only if the postpone bit is not set or
4342 : : * the extra push function is called.
4343 : : * The flow destruction status should be checked from dequeue result.
4344 : : *
4345 : : * @param[in] dev
4346 : : * Pointer to the rte_eth_dev structure.
4347 : : * @param[in] queue
4348 : : * The queue to destroy the flow.
4349 : : * @param[in] attr
4350 : : * Pointer to the flow operation attributes.
4351 : : * @param[in] flow
4352 : : * Pointer to the flow to be destroyed.
4353 : : * @param[in] actions
4354 : : * Action with flow spec value.
4355 : : * @param[in] action_template_index
4356 : : * The action pattern flow follows from the table.
4357 : : * @param[in] user_data
4358 : : * Pointer to the user_data.
4359 : : * @param[out] error
4360 : : * Pointer to error structure.
4361 : : *
4362 : : * @return
4363 : : * 0 on success, negative value otherwise and rte_errno is set.
4364 : : */
4365 : : static int
4366 : 0 : flow_hw_async_flow_update(struct rte_eth_dev *dev,
4367 : : uint32_t queue,
4368 : : const struct rte_flow_op_attr *attr,
4369 : : struct rte_flow *flow,
4370 : : const struct rte_flow_action actions[],
4371 : : uint8_t action_template_index,
4372 : : void *user_data,
4373 : : struct rte_flow_error *error)
4374 : : {
4375 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4376 : 0 : struct mlx5dr_rule_attr rule_attr = {
4377 : : .queue_id = queue,
4378 : : .user_data = user_data,
4379 : 0 : .burst = attr->postpone,
4380 : : };
4381 : : struct mlx5dr_rule_action *rule_acts;
4382 : : struct rte_flow_hw *of = (struct rte_flow_hw *)flow;
4383 : : struct rte_flow_hw *nf;
4384 : : struct rte_flow_hw_aux *aux;
4385 : 0 : struct rte_flow_template_table *table = of->table;
4386 : 0 : uint32_t res_idx = 0;
4387 : : int ret;
4388 : :
4389 [ # # ]: 0 : if (mlx5_fp_debug_enabled()) {
4390 [ # # ]: 0 : if (flow_hw_async_update_validate(dev, queue, of, actions, action_template_index,
4391 : : error))
4392 : 0 : return -rte_errno;
4393 : : }
4394 [ # # ]: 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, of);
4395 : 0 : nf = &aux->upd_flow;
4396 : 0 : nf->nt_rule = false;
4397 : 0 : rule_acts = flow_hw_get_dr_action_buffer(priv, table, action_template_index, queue);
4398 : : /*
4399 : : * Set the table here in order to know the destination table
4400 : : * when free the flow afterwards.
4401 : : */
4402 : 0 : nf->table = table;
4403 : 0 : nf->mt_idx = of->mt_idx;
4404 : 0 : nf->idx = of->idx;
4405 [ # # ]: 0 : if (table->resource) {
4406 : 0 : mlx5_ipool_malloc(table->resource, &res_idx);
4407 [ # # ]: 0 : if (!res_idx) {
4408 : 0 : rte_errno = ENOMEM;
4409 : 0 : goto error;
4410 : : }
4411 : 0 : nf->res_idx = res_idx;
4412 : : } else {
4413 : 0 : nf->res_idx = of->res_idx;
4414 : : }
4415 : 0 : nf->flags = 0;
4416 : : /* Indicate the construction function to set the proper fields. */
4417 : 0 : nf->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4418 : : /*
4419 : : * Indexed pool returns 1-based indices, but mlx5dr expects 0-based indices
4420 : : * for rule insertion hints.
4421 : : * If there is only one STE, the update will be atomic by nature.
4422 : : */
4423 : 0 : nf->rule_idx = nf->res_idx - 1;
4424 : 0 : rule_attr.rule_idx = nf->rule_idx;
4425 : : /*
4426 : : * Construct the flow actions based on the input actions.
4427 : : * The implicitly appended action is always fixed, like metadata
4428 : : * copy action from FDB to NIC Rx.
4429 : : * No need to copy and contrust a new "actions" list based on the
4430 : : * user's input, in order to save the cost.
4431 : : */
4432 [ # # ]: 0 : if (flow_hw_actions_construct(dev, nf, &priv->hw_q[queue].ap,
4433 : 0 : &table->ats[action_template_index],
4434 [ # # ]: 0 : table->its[nf->mt_idx]->item_flags,
4435 : : table, actions,
4436 : : rule_acts, queue, error)) {
4437 : 0 : rte_errno = EINVAL;
4438 : 0 : goto error;
4439 : : }
4440 : : /*
4441 : : * Set the flow operation type here in order to know if the flow memory
4442 : : * should be freed or not when get the result from dequeue.
4443 : : */
4444 : 0 : of->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE;
4445 : 0 : of->user_data = user_data;
4446 : 0 : of->flags |= MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW;
4447 : 0 : rule_attr.user_data = of;
4448 : 0 : ret = mlx5dr_rule_action_update((struct mlx5dr_rule *)of->rule,
4449 : : action_template_index, rule_acts, &rule_attr);
4450 [ # # ]: 0 : if (likely(!ret)) {
4451 : 0 : flow_hw_q_inc_flow_ops(priv, queue);
4452 : 0 : return 0;
4453 : : }
4454 : 0 : error:
4455 [ # # # # ]: 0 : if (table->resource && res_idx)
4456 : 0 : mlx5_ipool_free(table->resource, res_idx);
4457 : 0 : return rte_flow_error_set(error, rte_errno,
4458 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4459 : : "fail to update rte flow");
4460 : : }
4461 : :
4462 : : /**
4463 : : * Enqueue HW steering flow destruction.
4464 : : *
4465 : : * The flow will be applied to the HW only if the postpone bit is not set or
4466 : : * the extra push function is called.
4467 : : * The flow destruction status should be checked from dequeue result.
4468 : : *
4469 : : * @param[in] dev
4470 : : * Pointer to the rte_eth_dev structure.
4471 : : * @param[in] queue
4472 : : * The queue to destroy the flow.
4473 : : * @param[in] attr
4474 : : * Pointer to the flow operation attributes.
4475 : : * @param[in] flow
4476 : : * Pointer to the flow to be destroyed.
4477 : : * @param[in] user_data
4478 : : * Pointer to the user_data.
4479 : : * @param[out] error
4480 : : * Pointer to error structure.
4481 : : *
4482 : : * @return
4483 : : * 0 on success, negative value otherwise and rte_errno is set.
4484 : : */
4485 : : static int
4486 : 0 : flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
4487 : : uint32_t queue,
4488 : : const struct rte_flow_op_attr *attr,
4489 : : struct rte_flow *flow,
4490 : : void *user_data,
4491 : : struct rte_flow_error *error)
4492 : : {
4493 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4494 : 0 : struct mlx5dr_rule_attr rule_attr = {
4495 : : .queue_id = queue,
4496 : : .user_data = user_data,
4497 : 0 : .burst = attr->postpone,
4498 : : };
4499 : : struct rte_flow_hw *fh = (struct rte_flow_hw *)flow;
4500 : 0 : bool resizable = rte_flow_template_table_resizable(dev->data->port_id,
4501 : 0 : &fh->table->cfg.attr);
4502 : : int ret;
4503 : :
4504 : : if (mlx5_fp_debug_enabled()) {
4505 : : if (flow_hw_async_destroy_validate(dev, queue, fh, error))
4506 : : return -rte_errno;
4507 : : }
4508 [ # # ]: 0 : fh->operation_type = !resizable ?
4509 : : MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY :
4510 : : MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY;
4511 : 0 : fh->user_data = user_data;
4512 : 0 : rule_attr.user_data = fh;
4513 : 0 : rule_attr.rule_idx = fh->rule_idx;
4514 : 0 : ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
4515 [ # # ]: 0 : if (ret) {
4516 : 0 : return rte_flow_error_set(error, rte_errno,
4517 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4518 : : "fail to destroy rte flow");
4519 : : }
4520 : : flow_hw_q_inc_flow_ops(priv, queue);
4521 : 0 : return 0;
4522 : : }
4523 : :
4524 : : /**
4525 : : * Release the AGE and counter for given flow.
4526 : : *
4527 : : * @param[in] priv
4528 : : * Pointer to the port private data structure.
4529 : : * @param[in] queue
4530 : : * The queue to release the counter.
4531 : : * @param[in, out] flow
4532 : : * Pointer to the flow containing the counter.
4533 : : * @param[out] error
4534 : : * Pointer to error structure.
4535 : : */
4536 : : static void
4537 : 0 : flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
4538 : : struct rte_flow_hw *flow,
4539 : : struct rte_flow_error *error)
4540 : : {
4541 [ # # ]: 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(priv->dev_data->port_id, flow);
4542 : : uint32_t *cnt_queue;
4543 : 0 : uint32_t age_idx = aux->orig.age_idx;
4544 : :
4545 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID);
4546 [ # # # # ]: 0 : if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) {
4547 [ # # # # ]: 0 : if ((flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) &&
4548 : : !mlx5_hws_age_is_indirect(age_idx)) {
4549 : : /* Remove this AGE parameter from indirect counter. */
4550 [ # # ]: 0 : mlx5_hws_cnt_age_set(priv->hws_cpool, flow->cnt_id, 0);
4551 : : /* Release the AGE parameter. */
4552 : 0 : mlx5_hws_age_action_destroy(priv, age_idx, error);
4553 : : }
4554 : 0 : return;
4555 : : }
4556 : : cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
4557 : : /* Put the counter first to reduce the race risk in BG thread. */
4558 [ # # ]: 0 : mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id);
4559 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX) {
4560 [ # # ]: 0 : if (mlx5_hws_age_is_indirect(age_idx)) {
4561 : 0 : uint32_t idx = age_idx & MLX5_HWS_AGE_IDX_MASK;
4562 : :
4563 : : mlx5_hws_age_nb_cnt_decrease(priv, idx);
4564 : : } else {
4565 : : /* Release the AGE parameter. */
4566 : 0 : mlx5_hws_age_action_destroy(priv, age_idx, error);
4567 : : }
4568 : : }
4569 : : }
4570 : :
4571 : : static __rte_always_inline void
4572 : : flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
4573 : : uint32_t queue)
4574 : : {
4575 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4576 : : struct mlx5_aso_ct_action *aso_ct;
4577 : : struct mlx5_aso_mtr *aso_mtr;
4578 : : uint32_t type, idx;
4579 : :
4580 [ # # ]: 0 : if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
4581 : : MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
4582 : 0 : mlx5_quota_async_completion(dev, queue, job);
4583 [ # # ]: 0 : } else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
4584 : : type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4585 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4586 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4587 : 0 : mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
4588 : : }
4589 [ # # ]: 0 : } else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
4590 : : type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4591 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
4592 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4593 : 0 : aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
4594 : 0 : aso_mtr->state = ASO_METER_READY;
4595 [ # # ]: 0 : } else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4596 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4597 : 0 : aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4598 : 0 : aso_ct->state = ASO_CONNTRACK_READY;
4599 : : }
4600 [ # # ]: 0 : } else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
4601 : : type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
4602 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
4603 : 0 : idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
4604 : 0 : aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
4605 : 0 : mlx5_aso_ct_obj_analyze(job->query.user,
4606 : 0 : job->query.hw);
4607 : 0 : aso_ct->state = ASO_CONNTRACK_READY;
4608 : : }
4609 : : }
4610 : : }
4611 : :
4612 : : static __rte_always_inline int
4613 : : mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,
4614 : : uint32_t queue, struct rte_flow_op_result res[],
4615 : : uint16_t n_res)
4616 : : {
4617 : : uint32_t size, i;
4618 : 0 : struct rte_flow_hw *flow = NULL;
4619 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4620 : 0 : struct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;
4621 : :
4622 : 0 : if (ring == NULL)
4623 : : return 0;
4624 : :
4625 : 0 : size = RTE_MIN(rte_ring_count(ring), n_res);
4626 [ # # ]: 0 : for (i = 0; i < size; i++) {
4627 [ # # # # : 0 : res[i].status = RTE_FLOW_OP_SUCCESS;
# ]
4628 : : rte_ring_dequeue(ring, (void **)&flow);
4629 : 0 : res[i].user_data = flow->user_data;
4630 : : flow_hw_q_dec_flow_ops(priv, queue);
4631 : : }
4632 : 0 : return (int)size;
4633 : : }
4634 : :
4635 : : static inline int
4636 : 0 : __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
4637 : : uint32_t queue,
4638 : : struct rte_flow_op_result res[],
4639 : : uint16_t n_res)
4640 : :
4641 : : {
4642 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4643 : 0 : struct rte_ring *r = priv->hw_q[queue].indir_cq;
4644 : 0 : void *user_data = NULL;
4645 : : int ret_comp, i;
4646 : :
4647 : 0 : ret_comp = (int)rte_ring_count(r);
4648 : 0 : if (ret_comp > n_res)
4649 : : ret_comp = n_res;
4650 [ # # ]: 0 : for (i = 0; i < ret_comp; i++) {
4651 : : rte_ring_dequeue(r, &user_data);
4652 : 0 : res[i].user_data = user_data;
4653 : 0 : res[i].status = RTE_FLOW_OP_SUCCESS;
4654 : : }
4655 [ # # ]: 0 : if (!priv->shared_host) {
4656 [ # # # # ]: 0 : if (ret_comp < n_res && priv->hws_mpool)
4657 : 0 : ret_comp += mlx5_aso_pull_completion(&priv->hws_mpool->sq[queue],
4658 : 0 : &res[ret_comp], n_res - ret_comp);
4659 [ # # # # ]: 0 : if (ret_comp < n_res && priv->hws_ctpool)
4660 : 0 : ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
4661 : 0 : &res[ret_comp], n_res - ret_comp);
4662 : : }
4663 [ # # # # ]: 0 : if (ret_comp < n_res && priv->quota_ctx.sq)
4664 : 0 : ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
4665 : 0 : &res[ret_comp],
4666 : 0 : n_res - ret_comp);
4667 [ # # ]: 0 : for (i = 0; i < ret_comp; i++) {
4668 : 0 : struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
4669 : :
4670 : : /* Restore user data. */
4671 : 0 : res[i].user_data = job->user_data;
4672 [ # # ]: 0 : if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
4673 : : flow_hw_pull_legacy_indirect_comp(dev, job, queue);
4674 : : /*
4675 : : * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
4676 : : * These indirect list types do not post WQE to create action.
4677 : : * Future indirect list types that do post WQE will add
4678 : : * completion handlers here.
4679 : : */
4680 : : flow_hw_job_put(priv, job, queue);
4681 : : }
4682 : 0 : return ret_comp;
4683 : : }
4684 : :
4685 : : static __rte_always_inline void
4686 : : hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
4687 : : struct rte_flow_hw *flow,
4688 : : uint32_t queue, struct rte_flow_error *error)
4689 : : {
4690 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4691 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
4692 : 0 : struct rte_flow_template_table *table = flow->table;
4693 : : /* Release the original resource index in case of update. */
4694 : 0 : uint32_t res_idx = flow->res_idx;
4695 : :
4696 [ # # ]: 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAGS_ALL) {
4697 [ # # # # : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
# # ]
4698 : :
4699 [ # # # # : 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP)
# # ]
4700 : 0 : flow_hw_jump_release(dev, flow->jump);
4701 [ # # # # : 0 : else if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ)
# # ]
4702 : 0 : mlx5_hrxq_obj_release(dev, flow->hrxq);
4703 [ # # # # : 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID)
# # ]
4704 : 0 : flow_hw_age_count_release(priv, queue, flow, error);
4705 [ # # # # : 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MTR_ID)
# # ]
4706 : 0 : mlx5_ipool_free(pool->idx_pool, aux->orig.mtr_id);
4707 [ # # # # : 0 : if (flow->flags & MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW) {
# # ]
4708 [ # # # # : 0 : struct rte_flow_hw *upd_flow = &aux->upd_flow;
# # ]
4709 : :
4710 : : rte_memcpy(flow, upd_flow, offsetof(struct rte_flow_hw, rule));
4711 : 0 : aux->orig = aux->upd;
4712 : 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE;
4713 [ # # # # : 0 : if (!flow->nt_rule && table->resource)
# # # # #
# # # ]
4714 : 0 : mlx5_ipool_free(table->resource, res_idx);
4715 : : }
4716 : : }
4717 [ # # # # : 0 : if (flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY ||
# # ]
4718 : : flow->operation_type == MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY) {
4719 [ # # # # : 0 : if (!flow->nt_rule) {
# # ]
4720 [ # # # # : 0 : if (table->resource)
# # ]
4721 : 0 : mlx5_ipool_free(table->resource, res_idx);
4722 [ # # # # : 0 : if (table->flow_pool)
# # ]
4723 : 0 : mlx5_ipool_free(table->flow_pool, flow->idx);
4724 : : }
4725 : : }
4726 : : }
4727 : :
4728 : : static __rte_always_inline void
4729 : : hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,
4730 : : struct rte_flow_hw *flow,
4731 : : uint32_t queue, enum rte_flow_op_status status,
4732 : : struct rte_flow_error *error)
4733 : : {
4734 : 0 : struct rte_flow_template_table *table = flow->table;
4735 : 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, flow);
4736 : 0 : uint32_t selector = aux->matcher_selector;
4737 : 0 : uint32_t other_selector = (selector + 1) & 1;
4738 : :
4739 : : MLX5_ASSERT(flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
4740 [ # # # # ]: 0 : switch (flow->operation_type) {
4741 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4742 : 0 : rte_atomic_fetch_add_explicit
4743 : : (&table->matcher_info[selector].refcnt, 1,
4744 : : rte_memory_order_relaxed);
4745 : 0 : break;
4746 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4747 [ # # ]: 0 : rte_atomic_fetch_sub_explicit
4748 : : (&table->matcher_info[selector].refcnt, 1,
4749 : : rte_memory_order_relaxed);
4750 : : hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4751 : : break;
4752 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4753 [ # # ]: 0 : if (status == RTE_FLOW_OP_SUCCESS) {
4754 : 0 : rte_atomic_fetch_sub_explicit
4755 : : (&table->matcher_info[selector].refcnt, 1,
4756 : : rte_memory_order_relaxed);
4757 : 0 : rte_atomic_fetch_add_explicit
4758 : : (&table->matcher_info[other_selector].refcnt, 1,
4759 : : rte_memory_order_relaxed);
4760 : 0 : aux->matcher_selector = other_selector;
4761 : : }
4762 : : break;
4763 : : default:
4764 : : break;
4765 : : }
4766 : : }
4767 : :
4768 : : /**
4769 : : * Pull the enqueued flows.
4770 : : *
4771 : : * For flows enqueued from creation/destruction, the status should be
4772 : : * checked from the dequeue result.
4773 : : *
4774 : : * @param[in] dev
4775 : : * Pointer to the rte_eth_dev structure.
4776 : : * @param[in] queue
4777 : : * The queue to pull the result.
4778 : : * @param[in/out] res
4779 : : * Array to save the results.
4780 : : * @param[in] n_res
4781 : : * Available result with the array.
4782 : : * @param[out] error
4783 : : * Pointer to error structure.
4784 : : *
4785 : : * @return
4786 : : * Result number on success, negative value otherwise and rte_errno is set.
4787 : : */
4788 : : static int
4789 : 0 : flow_hw_pull(struct rte_eth_dev *dev,
4790 : : uint32_t queue,
4791 : : struct rte_flow_op_result res[],
4792 : : uint16_t n_res,
4793 : : struct rte_flow_error *error)
4794 : : {
4795 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4796 : : int ret, i;
4797 : :
4798 : : /* 1. Pull the flow completion. */
4799 : 0 : ret = mlx5dr_send_queue_poll(priv->dr_ctx, queue, res, n_res);
4800 [ # # ]: 0 : if (ret < 0)
4801 : 0 : return rte_flow_error_set(error, rte_errno,
4802 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4803 : : "fail to query flow queue");
4804 [ # # ]: 0 : for (i = 0; i < ret; i++) {
4805 : 0 : struct rte_flow_hw *flow = res[i].user_data;
4806 : :
4807 : : /* Restore user data. */
4808 : 0 : res[i].user_data = flow->user_data;
4809 [ # # # ]: 0 : switch (flow->operation_type) {
4810 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY:
4811 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE:
4812 : : hw_cmpl_flow_update_or_destroy(dev, flow, queue, error);
4813 : : break;
4814 : 0 : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE:
4815 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY:
4816 : : case MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE:
4817 [ # # ]: 0 : hw_cmpl_resizable_tbl(dev, flow, queue, res[i].status, error);
4818 : : break;
4819 : : default:
4820 : : break;
4821 : : }
4822 : : flow_hw_q_dec_flow_ops(priv, queue);
4823 : : }
4824 : : /* 2. Pull indirect action comp. */
4825 [ # # ]: 0 : if (ret < n_res)
4826 : 0 : ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
4827 : 0 : n_res - ret);
4828 [ # # ]: 0 : if (ret < n_res)
4829 : 0 : ret += mlx5_hw_pull_flow_transfer_comp(dev, queue, &res[ret],
4830 [ # # ]: 0 : n_res - ret);
4831 : :
4832 : : return ret;
4833 : : }
4834 : :
4835 : : static uint32_t
4836 : 0 : mlx5_hw_push_queue(struct rte_ring *pending_q, struct rte_ring *cmpl_q)
4837 : : {
4838 : 0 : void *job = NULL;
4839 : : uint32_t i, size = rte_ring_count(pending_q);
4840 : :
4841 [ # # ]: 0 : for (i = 0; i < size; i++) {
4842 : : rte_ring_dequeue(pending_q, &job);
4843 [ # # # # : 0 : rte_ring_enqueue(cmpl_q, job);
# ]
4844 : : }
4845 : 0 : return size;
4846 : : }
4847 : :
4848 : : static inline uint32_t
4849 : 0 : __flow_hw_push_action(struct rte_eth_dev *dev,
4850 : : uint32_t queue)
4851 : : {
4852 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4853 : 0 : struct mlx5_hw_q *hw_q = &priv->hw_q[queue];
4854 : :
4855 : 0 : mlx5_hw_push_queue(hw_q->indir_iq, hw_q->indir_cq);
4856 [ # # # # ]: 0 : if (hw_q->flow_transfer_pending != NULL && hw_q->flow_transfer_completed != NULL)
4857 : 0 : mlx5_hw_push_queue(hw_q->flow_transfer_pending,
4858 : : hw_q->flow_transfer_completed);
4859 [ # # ]: 0 : if (!priv->shared_host) {
4860 [ # # ]: 0 : if (priv->hws_ctpool)
4861 : 0 : mlx5_aso_push_wqe(priv->sh,
4862 : 0 : &priv->ct_mng->aso_sqs[queue]);
4863 [ # # ]: 0 : if (priv->hws_mpool)
4864 : 0 : mlx5_aso_push_wqe(priv->sh,
4865 : 0 : &priv->hws_mpool->sq[queue]);
4866 : : }
4867 : 0 : return flow_hw_q_pending(priv, queue);
4868 : : }
4869 : :
4870 : : static int
4871 : 0 : __flow_hw_push(struct rte_eth_dev *dev,
4872 : : uint32_t queue,
4873 : : struct rte_flow_error *error)
4874 : : {
4875 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4876 : : int ret, num;
4877 : :
4878 : 0 : num = __flow_hw_push_action(dev, queue);
4879 : 0 : ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
4880 : : MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
4881 [ # # ]: 0 : if (ret) {
4882 : 0 : rte_flow_error_set(error, rte_errno,
4883 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4884 : : "fail to push flows");
4885 : 0 : return ret;
4886 : : }
4887 : : return num;
4888 : : }
4889 : :
4890 : : /**
4891 : : * Push the enqueued flows to HW.
4892 : : *
4893 : : * Force apply all the enqueued flows to the HW.
4894 : : *
4895 : : * @param[in] dev
4896 : : * Pointer to the rte_eth_dev structure.
4897 : : * @param[in] queue
4898 : : * The queue to push the flow.
4899 : : * @param[out] error
4900 : : * Pointer to error structure.
4901 : : *
4902 : : * @return
4903 : : * 0 on success, negative value otherwise and rte_errno is set.
4904 : : */
4905 : : static int
4906 : 0 : flow_hw_push(struct rte_eth_dev *dev,
4907 : : uint32_t queue, struct rte_flow_error *error)
4908 : : {
4909 : 0 : int ret = __flow_hw_push(dev, queue, error);
4910 : :
4911 : 0 : return ret >= 0 ? 0 : ret;
4912 : : }
4913 : :
4914 : : /**
4915 : : * Drain the enqueued flows' completion.
4916 : : *
4917 : : * @param[in] dev
4918 : : * Pointer to the rte_eth_dev structure.
4919 : : * @param[in] queue
4920 : : * The queue to pull the flow.
4921 : : * @param[out] error
4922 : : * Pointer to error structure.
4923 : : *
4924 : : * @return
4925 : : * 0 on success, negative value otherwise and rte_errno is set.
4926 : : */
4927 : : static int
4928 : 0 : __flow_hw_pull_comp(struct rte_eth_dev *dev,
4929 : : uint32_t queue, struct rte_flow_error *error)
4930 : : {
4931 : : struct rte_flow_op_result comp[BURST_THR];
4932 : : int ret, i, empty_loop = 0;
4933 : : uint32_t pending_rules;
4934 : :
4935 : 0 : ret = __flow_hw_push(dev, queue, error);
4936 [ # # ]: 0 : if (ret < 0)
4937 : : return ret;
4938 : 0 : pending_rules = ret;
4939 [ # # ]: 0 : while (pending_rules) {
4940 : 0 : ret = flow_hw_pull(dev, queue, comp, BURST_THR, error);
4941 [ # # ]: 0 : if (ret < 0)
4942 : : return -1;
4943 [ # # ]: 0 : if (!ret) {
4944 : 0 : rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
4945 [ # # ]: 0 : if (++empty_loop > 5) {
4946 : 0 : DRV_LOG(WARNING, "No available dequeue %u, quit.", pending_rules);
4947 : 0 : break;
4948 : : }
4949 : 0 : continue;
4950 : : }
4951 [ # # ]: 0 : for (i = 0; i < ret; i++) {
4952 [ # # ]: 0 : if (comp[i].status == RTE_FLOW_OP_ERROR)
4953 : 0 : DRV_LOG(WARNING, "Flow flush get error CQE.");
4954 : : }
4955 : : /*
4956 : : * Indirect **SYNC** METER_MARK and CT actions do not
4957 : : * remove completion after WQE post.
4958 : : * That implementation avoids HW timeout.
4959 : : * The completion is removed before the following WQE post.
4960 : : * However, HWS queue updates do not reflect that behaviour.
4961 : : * Therefore, during port destruction sync queue may have
4962 : : * pending completions.
4963 : : */
4964 : 0 : pending_rules -= RTE_MIN(pending_rules, (uint32_t)ret);
4965 : : empty_loop = 0;
4966 : : }
4967 : : return 0;
4968 : : }
4969 : :
4970 : : /**
4971 : : * Flush created flows.
4972 : : *
4973 : : * @param[in] dev
4974 : : * Pointer to the rte_eth_dev structure.
4975 : : * @param[out] error
4976 : : * Pointer to error structure.
4977 : : *
4978 : : * @return
4979 : : * 0 on success, negative value otherwise and rte_errno is set.
4980 : : */
4981 : : int
4982 : 0 : mlx5_flow_hw_q_flow_flush(struct rte_eth_dev *dev,
4983 : : struct rte_flow_error *error)
4984 : : {
4985 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
4986 : 0 : struct mlx5_hw_q *hw_q = &priv->hw_q[MLX5_DEFAULT_FLUSH_QUEUE];
4987 : : struct rte_flow_template_table *tbl;
4988 : : struct rte_flow_hw *flow;
4989 : 0 : struct rte_flow_op_attr attr = {
4990 : : .postpone = 0,
4991 : : };
4992 : : uint32_t pending_rules = 0;
4993 : : uint32_t queue;
4994 : : uint32_t fidx;
4995 : :
4996 : : /*
4997 : : * Ensure to push and dequeue all the enqueued flow
4998 : : * creation/destruction jobs in case user forgot to
4999 : : * dequeue. Or the enqueued created flows will be
5000 : : * leaked. The forgotten dequeues would also cause
5001 : : * flow flush get extra CQEs as expected and pending_rules
5002 : : * be minus value.
5003 : : */
5004 [ # # ]: 0 : for (queue = 0; queue < priv->nb_queue; queue++) {
5005 [ # # ]: 0 : if (__flow_hw_pull_comp(dev, queue, error))
5006 : : return -1;
5007 : : }
5008 : : /* Flush flow per-table from MLX5_DEFAULT_FLUSH_QUEUE. */
5009 [ # # ]: 0 : LIST_FOREACH(tbl, &priv->flow_hw_tbl, next) {
5010 [ # # ]: 0 : if (!tbl->cfg.external)
5011 : 0 : continue;
5012 [ # # ]: 0 : MLX5_IPOOL_FOREACH(tbl->flow_pool, fidx, flow) {
5013 [ # # ]: 0 : if (flow_hw_async_flow_destroy(dev,
5014 : : MLX5_DEFAULT_FLUSH_QUEUE,
5015 : : &attr,
5016 : : (struct rte_flow *)flow,
5017 : : NULL,
5018 : : error))
5019 : : return -1;
5020 : 0 : pending_rules++;
5021 : : /* Drain completion with queue size. */
5022 [ # # ]: 0 : if (pending_rules >= hw_q->size) {
5023 [ # # ]: 0 : if (__flow_hw_pull_comp(dev,
5024 : : MLX5_DEFAULT_FLUSH_QUEUE,
5025 : : error))
5026 : : return -1;
5027 : : pending_rules = 0;
5028 : : }
5029 : : }
5030 : : }
5031 : : /* Drain left completion. */
5032 [ # # # # ]: 0 : if (pending_rules &&
5033 : 0 : __flow_hw_pull_comp(dev, MLX5_DEFAULT_FLUSH_QUEUE, error))
5034 : 0 : return -1;
5035 : : return 0;
5036 : : }
5037 : :
5038 : : static int
5039 : 0 : mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev,
5040 : : struct rte_flow_template_table *tbl,
5041 : : struct mlx5_multi_pattern_segment *segment,
5042 : : uint32_t bulk_size,
5043 : : struct rte_flow_error *error)
5044 : : {
5045 : : int ret = 0;
5046 : : uint32_t i;
5047 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5048 : : bool unified_fdb = is_unified_fdb(priv);
5049 : : struct mlx5_tbl_multi_pattern_ctx *mpctx = &tbl->mpctx;
5050 : : const struct rte_flow_template_table_attr *table_attr = &tbl->cfg.attr;
5051 : 0 : const struct rte_flow_attr *attr = &table_attr->flow_attr;
5052 : : enum mlx5dr_table_type type =
5053 : 0 : get_mlx5dr_table_type(attr, table_attr->specialize, unified_fdb);
5054 : 0 : uint32_t flags = mlx5_hw_act_flag[!!attr->group][type];
5055 : : struct mlx5dr_action *dr_action = NULL;
5056 : :
5057 [ # # ]: 0 : for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
5058 [ # # ]: 0 : typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + i;
5059 : : enum mlx5dr_action_type reformat_type =
5060 : : mlx5_multi_pattern_reformat_index_to_type(i);
5061 : :
5062 [ # # ]: 0 : if (!reformat->elements_num)
5063 : 0 : continue;
5064 : : dr_action = reformat_type == MLX5DR_ACTION_TYP_INSERT_HEADER ?
5065 : : mlx5dr_action_create_insert_header
5066 : : (priv->dr_ctx, reformat->elements_num,
5067 : : reformat->insert_hdr, bulk_size, flags) :
5068 : 0 : mlx5dr_action_create_reformat
5069 : : (priv->dr_ctx, reformat_type, reformat->elements_num,
5070 : 0 : reformat->reformat_hdr, bulk_size, flags);
5071 [ # # ]: 0 : if (!dr_action) {
5072 : 0 : ret = rte_flow_error_set(error, rte_errno,
5073 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5074 : : NULL,
5075 : : "failed to create multi-pattern encap action");
5076 : 0 : goto error;
5077 : : }
5078 : 0 : segment->reformat_action[i] = dr_action;
5079 : : }
5080 [ # # ]: 0 : if (mpctx->mh.elements_num) {
5081 : : typeof(mpctx->mh) *mh = &mpctx->mh;
5082 : 0 : dr_action = mlx5dr_action_create_modify_header
5083 : 0 : (priv->dr_ctx, mpctx->mh.elements_num, mh->pattern,
5084 : : bulk_size, flags);
5085 [ # # ]: 0 : if (!dr_action) {
5086 : 0 : ret = rte_flow_error_set(error, rte_errno,
5087 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5088 : : NULL, "failed to create multi-pattern header modify action");
5089 : 0 : goto error;
5090 : : }
5091 : 0 : segment->mhdr_action = dr_action;
5092 : : }
5093 [ # # ]: 0 : if (dr_action) {
5094 : 0 : segment->capacity = RTE_BIT32(bulk_size);
5095 [ # # ]: 0 : if (segment != &mpctx->segments[MLX5_MAX_TABLE_RESIZE_NUM - 1])
5096 : 0 : segment[1].head_index = segment->head_index + segment->capacity;
5097 : : }
5098 : : return 0;
5099 : 0 : error:
5100 : 0 : mlx5_destroy_multi_pattern_segment(segment);
5101 : 0 : return ret;
5102 : : }
5103 : :
5104 : : static int
5105 : 0 : mlx5_hw_build_template_table(struct rte_eth_dev *dev,
5106 : : uint8_t nb_action_templates,
5107 : : struct rte_flow_actions_template *action_templates[],
5108 : : struct mlx5dr_action_template *at[],
5109 : : struct rte_flow_template_table *tbl,
5110 : : struct rte_flow_error *error)
5111 : : {
5112 : : int ret;
5113 : : uint8_t i;
5114 : :
5115 [ # # ]: 0 : for (i = 0; i < nb_action_templates; i++) {
5116 : 0 : uint32_t refcnt = rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
5117 : : rte_memory_order_relaxed) + 1;
5118 : :
5119 [ # # ]: 0 : if (refcnt <= 1) {
5120 : 0 : rte_flow_error_set(error, EINVAL,
5121 : : RTE_FLOW_ERROR_TYPE_ACTION,
5122 : : &action_templates[i], "invalid AT refcount");
5123 : 0 : goto at_error;
5124 : : }
5125 : 0 : at[i] = action_templates[i]->tmpl;
5126 : 0 : tbl->ats[i].action_template = action_templates[i];
5127 : 0 : LIST_INIT(&tbl->ats[i].acts.act_list);
5128 : : /* do NOT translate table action if `dev` was not started */
5129 [ # # ]: 0 : if (!dev->data->dev_started)
5130 : 0 : continue;
5131 : 0 : ret = flow_hw_translate_actions_template(dev, &tbl->cfg,
5132 : : &tbl->ats[i].acts,
5133 : : action_templates[i],
5134 : : &tbl->mpctx, error);
5135 [ # # ]: 0 : if (ret) {
5136 : 0 : i++;
5137 : 0 : goto at_error;
5138 : : }
5139 : 0 : flow_hw_populate_rule_acts_caches(dev, tbl, i);
5140 : : }
5141 [ # # ]: 0 : tbl->nb_action_templates = nb_action_templates;
5142 [ # # ]: 0 : if (mlx5_is_multi_pattern_active(&tbl->mpctx)) {
5143 [ # # ]: 0 : ret = mlx5_tbl_multi_pattern_process(dev, tbl,
5144 : : &tbl->mpctx.segments[0],
5145 : : rte_log2_u32(tbl->cfg.attr.nb_flows),
5146 : : error);
5147 [ # # ]: 0 : if (ret)
5148 : 0 : goto at_error;
5149 : : }
5150 : : return 0;
5151 : :
5152 : : at_error:
5153 [ # # ]: 0 : while (i--) {
5154 : 0 : __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
5155 : 0 : rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
5156 : : 1, rte_memory_order_relaxed);
5157 : : }
5158 : 0 : return rte_errno;
5159 : : }
5160 : :
5161 : : static bool
5162 : : flow_hw_validate_template_domain(const struct rte_flow_attr *table_attr,
5163 : : uint32_t ingress, uint32_t egress, uint32_t transfer)
5164 : : {
5165 : 0 : if (table_attr->ingress)
5166 : : return ingress != 0;
5167 [ # # # # ]: 0 : else if (table_attr->egress)
5168 : : return egress != 0;
5169 : : else
5170 : 0 : return transfer;
5171 : : }
5172 : :
5173 : : static bool
5174 : : flow_hw_validate_table_domain(const struct rte_flow_attr *table_attr)
5175 : : {
5176 : 0 : return table_attr->ingress + table_attr->egress + table_attr->transfer
5177 : : == 1;
5178 : : }
5179 : :
5180 : : /**
5181 : : * Create flow table.
5182 : : *
5183 : : * The input item and action templates will be binded to the table.
5184 : : * Flow memory will also be allocated. Matcher will be created based
5185 : : * on the item template. Action will be translated to the dedicated
5186 : : * DR action if possible.
5187 : : *
5188 : : * @param[in] dev
5189 : : * Pointer to the rte_eth_dev structure.
5190 : : * @param[in] table_cfg
5191 : : * Pointer to the table configuration.
5192 : : * @param[in] item_templates
5193 : : * Item template array to be binded to the table.
5194 : : * @param[in] nb_item_templates
5195 : : * Number of item template.
5196 : : * @param[in] action_templates
5197 : : * Action template array to be binded to the table.
5198 : : * @param[in] nb_action_templates
5199 : : * Number of action template.
5200 : : * @param[out] error
5201 : : * Pointer to error structure.
5202 : : *
5203 : : * @return
5204 : : * Table on success, NULL otherwise and rte_errno is set.
5205 : : */
5206 : : static struct rte_flow_template_table *
5207 : 0 : flow_hw_table_create(struct rte_eth_dev *dev,
5208 : : const struct mlx5_flow_template_table_cfg *table_cfg,
5209 : : struct rte_flow_pattern_template *item_templates[],
5210 : : uint8_t nb_item_templates,
5211 : : struct rte_flow_actions_template *action_templates[],
5212 : : uint8_t nb_action_templates,
5213 : : struct rte_flow_error *error)
5214 : : {
5215 : 0 : struct rte_flow_error sub_error = {
5216 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
5217 : : .cause = NULL,
5218 : : .message = NULL,
5219 : : };
5220 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
5221 : : bool unified_fdb = is_unified_fdb(priv);
5222 : 0 : struct mlx5dr_matcher_attr matcher_attr = {0};
5223 : 0 : struct mlx5dr_action_jump_to_matcher_attr jump_attr = {
5224 : : .type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX,
5225 : : .matcher = NULL,
5226 : : };
5227 : : struct rte_flow_template_table *tbl = NULL;
5228 : : struct mlx5_flow_group *grp;
5229 : : struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
5230 : : struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
5231 : 0 : const struct rte_flow_template_table_attr *attr = &table_cfg->attr;
5232 : 0 : struct rte_flow_attr flow_attr = attr->flow_attr;
5233 : 0 : uint32_t specialize = table_cfg->attr.specialize;
5234 : 0 : struct mlx5_flow_cb_ctx ctx = {
5235 : : .dev = dev,
5236 : : .error = &sub_error,
5237 : : .data = &flow_attr,
5238 : : .data2 = &specialize,
5239 : : };
5240 : 0 : struct mlx5_indexed_pool_config cfg = {
5241 : : .trunk_size = 1 << 12,
5242 : : .per_core_cache = 1 << 13,
5243 : : .need_lock = 1,
5244 : 0 : .release_mem_en = !!priv->sh->config.reclaim_mode,
5245 : : .malloc = mlx5_malloc,
5246 : : .free = mlx5_free,
5247 : : .type = "mlx5_hw_table_flow",
5248 : : };
5249 : : struct mlx5_list_entry *ge;
5250 : : uint32_t i = 0, max_tpl = MLX5_HW_TBL_MAX_ITEM_TEMPLATE;
5251 [ # # ]: 0 : uint32_t nb_flows = rte_align32pow2(attr->nb_flows);
5252 : 0 : bool port_started = !!dev->data->dev_started;
5253 : : bool rpool_needed;
5254 : : size_t tbl_mem_size;
5255 : : enum mlx5dr_table_type table_type;
5256 : : int err;
5257 : :
5258 [ # # ]: 0 : if (!flow_hw_validate_table_domain(&attr->flow_attr)) {
5259 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
5260 : : NULL, "invalid table domain attributes");
5261 : 0 : return NULL;
5262 : : }
5263 [ # # ]: 0 : for (i = 0; i < nb_item_templates; i++) {
5264 : : const struct rte_flow_pattern_template_attr *pt_attr =
5265 : 0 : &item_templates[i]->attr;
5266 : : bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5267 : 0 : pt_attr->ingress,
5268 : 0 : pt_attr->egress,
5269 [ # # ]: 0 : pt_attr->transfer);
5270 [ # # ]: 0 : if (!match) {
5271 : 0 : rte_flow_error_set(error, EINVAL,
5272 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5273 : : NULL, "pattern template domain does not match table");
5274 : 0 : return NULL;
5275 : : }
5276 [ # # # # ]: 0 : if (item_templates[i]->item_flags & MLX5_FLOW_LAYER_ECPRI &&
5277 : 0 : !mlx5_flex_parser_ecpri_exist(dev))
5278 [ # # ]: 0 : if (mlx5_flex_parser_ecpri_alloc(dev)) {
5279 : 0 : rte_flow_error_set(error, EIO,
5280 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5281 : : NULL,
5282 : : "failed to create Flex parser "
5283 : : "profile for ECPRI");
5284 : 0 : goto error;
5285 : : }
5286 : : }
5287 [ # # ]: 0 : for (i = 0; i < nb_action_templates; i++) {
5288 : 0 : const struct rte_flow_actions_template *at = action_templates[i];
5289 : : bool match = flow_hw_validate_template_domain(&attr->flow_attr,
5290 : 0 : at->attr.ingress,
5291 : 0 : at->attr.egress,
5292 [ # # ]: 0 : at->attr.transfer);
5293 [ # # ]: 0 : if (!match) {
5294 : 0 : rte_flow_error_set(error, EINVAL,
5295 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5296 : : NULL, "action template domain does not match table");
5297 : 0 : return NULL;
5298 : : }
5299 : : }
5300 : : /* HWS layer accepts only 1 item template with root table. */
5301 [ # # ]: 0 : if (!attr->flow_attr.group)
5302 : : max_tpl = 1;
5303 : 0 : cfg.max_idx = nb_flows;
5304 [ # # ]: 0 : cfg.size = !rte_flow_template_table_resizable(dev->data->port_id, attr) ?
5305 : : mlx5_flow_hw_entry_size() :
5306 : : mlx5_flow_hw_auxed_entry_size();
5307 : : /* For table has very limited flows, disable cache. */
5308 [ # # ]: 0 : if (nb_flows < cfg.trunk_size) {
5309 : 0 : cfg.per_core_cache = 0;
5310 : 0 : cfg.trunk_size = nb_flows;
5311 [ # # ]: 0 : } else if (nb_flows <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
5312 : 0 : cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
5313 : : }
5314 : : /* Check if we requires too many templates. */
5315 [ # # # # ]: 0 : if (nb_item_templates > max_tpl ||
5316 : : nb_action_templates > MLX5_HW_TBL_MAX_ACTION_TEMPLATE) {
5317 : 0 : rte_errno = EINVAL;
5318 : 0 : goto error;
5319 : : }
5320 : : /*
5321 : : * Amount of memory required for rte_flow_template_table struct:
5322 : : * - Size of the struct itself.
5323 : : * - VLA of DR rule action containers at the end =
5324 : : * number of actions templates * number of queues * size of DR rule actions container.
5325 : : */
5326 : : tbl_mem_size = sizeof(*tbl);
5327 : 0 : tbl_mem_size += nb_action_templates * priv->nb_queue * sizeof(tbl->rule_acts[0]);
5328 : : /* Allocate the table memory. */
5329 : 0 : tbl = mlx5_malloc(MLX5_MEM_ZERO, tbl_mem_size, RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
5330 [ # # ]: 0 : if (!tbl)
5331 : 0 : goto error;
5332 : 0 : tbl->cfg = *table_cfg;
5333 : : /* Allocate flow indexed pool. */
5334 : 0 : tbl->flow_pool = mlx5_ipool_create(&cfg);
5335 [ # # ]: 0 : if (!tbl->flow_pool)
5336 : 0 : goto error;
5337 : : /* Allocate table of auxiliary flow rule structs. */
5338 : 0 : tbl->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux) * nb_flows,
5339 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
5340 [ # # ]: 0 : if (!tbl->flow_aux)
5341 : 0 : goto error;
5342 : : /* Register the flow group. */
5343 : 0 : ge = mlx5_hlist_register(priv->sh->groups, attr->flow_attr.group, &ctx);
5344 [ # # ]: 0 : if (!ge)
5345 : 0 : goto error;
5346 : : grp = container_of(ge, struct mlx5_flow_group, entry);
5347 : : /* Verify unified fdb sub domains consistency */
5348 : 0 : table_type = get_mlx5dr_table_type(&flow_attr, specialize, unified_fdb);
5349 [ # # ]: 0 : if (table_type != grp->type) {
5350 : 0 : DRV_LOG(ERR, "Table type (%u) does not match group id (%u) type (%u)",
5351 : : table_type, grp->group_id, grp->type);
5352 : 0 : rte_errno = EINVAL;
5353 : 0 : goto error;
5354 : : }
5355 : 0 : tbl->grp = grp;
5356 : : /* Prepare matcher information. */
5357 : 0 : matcher_attr.resizable = !!rte_flow_template_table_resizable
5358 : 0 : (dev->data->port_id, &table_cfg->attr);
5359 : 0 : matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
5360 : 0 : matcher_attr.priority = attr->flow_attr.priority;
5361 : 0 : matcher_attr.optimize_using_rule_idx = true;
5362 : 0 : matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_RULE;
5363 [ # # ]: 0 : matcher_attr.insert_mode = flow_hw_matcher_insert_mode_get(attr->insertion_type);
5364 [ # # ]: 0 : if (matcher_attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
5365 [ # # ]: 0 : if (attr->insertion_type == RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN) {
5366 : 0 : matcher_attr.isolated = true;
5367 : 0 : matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_DEFAULT;
5368 : : } else {
5369 : 0 : matcher_attr.isolated = false;
5370 : 0 : matcher_attr.match_mode = MLX5DR_MATCHER_MATCH_MODE_ALWAYS_HIT;
5371 : : }
5372 : : }
5373 [ # # ]: 0 : if (attr->hash_func == RTE_FLOW_TABLE_HASH_FUNC_CRC16) {
5374 : 0 : DRV_LOG(ERR, "16-bit checksum hash type is not supported");
5375 : 0 : rte_errno = ENOTSUP;
5376 : 0 : goto it_error;
5377 : : }
5378 [ # # ]: 0 : matcher_attr.distribute_mode = flow_hw_matcher_distribute_mode_get(attr->hash_func);
5379 : 0 : matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
5380 : : /* Parse hints information. */
5381 [ # # ]: 0 : if (attr->specialize) {
5382 : : uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG |
5383 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
5384 : :
5385 [ # # ]: 0 : if ((attr->specialize & val) == val) {
5386 : 0 : DRV_LOG(ERR, "Invalid hint value %x",
5387 : : attr->specialize);
5388 : 0 : rte_errno = EINVAL;
5389 : 0 : goto it_error;
5390 : : }
5391 [ # # ]: 0 : if (attr->specialize &
5392 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG) {
5393 : 0 : matcher_attr.optimize_flow_src =
5394 : : MLX5DR_MATCHER_FLOW_SRC_WIRE;
5395 [ # # ]: 0 : } else if (attr->specialize &
5396 : : RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG) {
5397 : 0 : matcher_attr.optimize_flow_src =
5398 : : MLX5DR_MATCHER_FLOW_SRC_VPORT;
5399 : : }
5400 : : }
5401 : : /* Build the item template. */
5402 [ # # ]: 0 : for (i = 0; i < nb_item_templates; i++) {
5403 : : uint32_t ret;
5404 : :
5405 [ # # # # ]: 0 : if ((flow_attr.ingress && !item_templates[i]->attr.ingress) ||
5406 [ # # # # ]: 0 : (flow_attr.egress && !item_templates[i]->attr.egress) ||
5407 [ # # # # ]: 0 : (flow_attr.transfer && !item_templates[i]->attr.transfer)) {
5408 : 0 : DRV_LOG(ERR, "pattern template and template table attribute mismatch");
5409 : 0 : rte_errno = EINVAL;
5410 : 0 : goto it_error;
5411 : : }
5412 [ # # ]: 0 : if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
5413 : 0 : matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
5414 : 0 : ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 1,
5415 : : rte_memory_order_relaxed) + 1;
5416 [ # # ]: 0 : if (ret <= 1) {
5417 : 0 : rte_errno = EINVAL;
5418 : 0 : goto it_error;
5419 : : }
5420 : 0 : mt[i] = item_templates[i]->mt;
5421 : 0 : tbl->its[i] = item_templates[i];
5422 : : }
5423 : 0 : tbl->nb_item_templates = nb_item_templates;
5424 : : /* Build the action template. */
5425 : 0 : err = mlx5_hw_build_template_table(dev, nb_action_templates,
5426 : : action_templates, at, tbl, &sub_error);
5427 [ # # ]: 0 : if (err) {
5428 : : i = nb_item_templates;
5429 : 0 : goto it_error;
5430 : : }
5431 : 0 : tbl->matcher_info[0].matcher = mlx5dr_matcher_create
5432 : 0 : (tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
5433 [ # # ]: 0 : if (!tbl->matcher_info[0].matcher)
5434 : 0 : goto at_error;
5435 : 0 : tbl->matcher_attr = matcher_attr;
5436 : 0 : tbl->type = table_type;
5437 [ # # ]: 0 : if (matcher_attr.isolated) {
5438 : 0 : jump_attr.matcher = tbl->matcher_info[0].matcher;
5439 : 0 : tbl->matcher_info[0].jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx,
5440 : 0 : &jump_attr, mlx5_hw_act_flag[!!attr->flow_attr.group][tbl->type]);
5441 [ # # ]: 0 : if (!tbl->matcher_info[0].jump)
5442 : 0 : goto jtm_error;
5443 : : }
5444 : : /*
5445 : : * Only the matcher supports update and needs more than 1 WQE, an additional
5446 : : * index is needed. Or else the flow index can be reused.
5447 : : */
5448 [ # # # # ]: 0 : rpool_needed = mlx5dr_matcher_is_updatable(tbl->matcher_info[0].matcher) &&
5449 : 0 : mlx5dr_matcher_is_dependent(tbl->matcher_info[0].matcher);
5450 [ # # ]: 0 : if (rpool_needed) {
5451 : : /* Allocate rule indexed pool. */
5452 : 0 : cfg.size = 0;
5453 : 0 : cfg.type = "mlx5_hw_table_rule";
5454 : 0 : cfg.max_idx += priv->hw_q[0].size;
5455 : 0 : tbl->resource = mlx5_ipool_create(&cfg);
5456 [ # # ]: 0 : if (!tbl->resource)
5457 : 0 : goto res_error;
5458 : : }
5459 [ # # ]: 0 : if (port_started)
5460 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5461 : : else
5462 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
5463 : : rte_rwlock_init(&tbl->matcher_replace_rwlk);
5464 : 0 : return tbl;
5465 : : res_error:
5466 [ # # ]: 0 : if (tbl->matcher_info[0].jump)
5467 : 0 : mlx5dr_action_destroy(tbl->matcher_info[0].jump);
5468 : 0 : jtm_error:
5469 [ # # ]: 0 : if (tbl->matcher_info[0].matcher)
5470 : 0 : (void)mlx5dr_matcher_destroy(tbl->matcher_info[0].matcher);
5471 : 0 : at_error:
5472 [ # # ]: 0 : for (i = 0; i < nb_action_templates; i++) {
5473 : 0 : __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
5474 : 0 : rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
5475 : : 1, rte_memory_order_relaxed);
5476 : : }
5477 : : i = nb_item_templates;
5478 : : it_error:
5479 [ # # ]: 0 : while (i--)
5480 : 0 : rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
5481 : : 1, rte_memory_order_relaxed);
5482 : 0 : error:
5483 : 0 : err = rte_errno;
5484 [ # # ]: 0 : if (tbl) {
5485 [ # # ]: 0 : if (tbl->grp)
5486 : 0 : mlx5_hlist_unregister(priv->sh->groups,
5487 : : &tbl->grp->entry);
5488 [ # # ]: 0 : if (tbl->flow_aux)
5489 : 0 : mlx5_free(tbl->flow_aux);
5490 [ # # ]: 0 : if (tbl->flow_pool)
5491 : 0 : mlx5_ipool_destroy(tbl->flow_pool);
5492 : 0 : mlx5_free(tbl);
5493 : : }
5494 [ # # ]: 0 : if (error != NULL) {
5495 [ # # ]: 0 : if (sub_error.type == RTE_FLOW_ERROR_TYPE_NONE)
5496 : 0 : rte_flow_error_set(error, err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5497 : : "Failed to create template table");
5498 : : else
5499 : : rte_memcpy(error, &sub_error, sizeof(sub_error));
5500 : : }
5501 : : return NULL;
5502 : : }
5503 : :
5504 : : /**
5505 : : * Update flow template table.
5506 : : *
5507 : : * @param[in] dev
5508 : : * Pointer to the rte_eth_dev structure.
5509 : : * @param[out] error
5510 : : * Pointer to error structure.
5511 : : *
5512 : : * @return
5513 : : * 0 on success, negative value otherwise and rte_errno is set.
5514 : : */
5515 : : int
5516 : 0 : mlx5_flow_hw_table_update(struct rte_eth_dev *dev,
5517 : : struct rte_flow_error *error)
5518 : : {
5519 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5520 : : struct rte_flow_template_table *tbl;
5521 : :
5522 [ # # ]: 0 : while ((tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo)) != NULL) {
5523 [ # # ]: 0 : if (flow_hw_translate_all_actions_templates(dev, tbl, error))
5524 : : return -1;
5525 [ # # ]: 0 : LIST_REMOVE(tbl, next);
5526 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
5527 : : }
5528 : : return 0;
5529 : : }
5530 : :
5531 : : static inline int
5532 : 0 : __translate_group(struct rte_eth_dev *dev,
5533 : : const struct rte_flow_attr *flow_attr,
5534 : : bool external,
5535 : : uint32_t group,
5536 : : uint32_t *table_group,
5537 : : struct rte_flow_error *error)
5538 : : {
5539 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5540 : 0 : struct mlx5_sh_config *config = &priv->sh->config;
5541 : :
5542 [ # # ]: 0 : if (config->dv_esw_en &&
5543 [ # # # # ]: 0 : priv->fdb_def_rule &&
5544 [ # # ]: 0 : external &&
5545 : : flow_attr->transfer) {
5546 [ # # ]: 0 : if (group > MLX5_HW_MAX_TRANSFER_GROUP)
5547 : 0 : return rte_flow_error_set(error, EINVAL,
5548 : : RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5549 : : NULL,
5550 : : "group index not supported");
5551 : 0 : *table_group = group + 1;
5552 [ # # ]: 0 : } else if (config->dv_esw_en &&
5553 [ # # ]: 0 : config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
5554 [ # # ]: 0 : external &&
5555 : : flow_attr->egress) {
5556 : : /*
5557 : : * On E-Switch setups, default egress flow rules are inserted to allow
5558 : : * representor matching and/or preserving metadata across steering domains.
5559 : : * These flow rules are inserted in group 0 and this group is reserved by PMD
5560 : : * for these purposes.
5561 : : *
5562 : : * As a result, if representor matching or extended metadata mode is enabled,
5563 : : * group provided by the user must be incremented to avoid inserting flow rules
5564 : : * in group 0.
5565 : : */
5566 [ # # ]: 0 : if (group > MLX5_HW_MAX_EGRESS_GROUP)
5567 : 0 : return rte_flow_error_set(error, EINVAL,
5568 : : RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5569 : : NULL,
5570 : : "group index not supported");
5571 : 0 : *table_group = group + 1;
5572 [ # # # # ]: 0 : } else if (mlx5_vport_tx_metadata_passing_enabled(priv->sh) &&
5573 [ # # ]: 0 : flow_attr->egress && external) {
5574 : : /*
5575 : : * If VM cross GVMI metadata Tx was enabled, PMD creates a default
5576 : : * flow rule in the group 0 to copy metadata value.
5577 : : */
5578 [ # # ]: 0 : if (group > MLX5_HW_MAX_EGRESS_GROUP)
5579 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5580 : : NULL, "group index not supported");
5581 : 0 : *table_group = group + 1;
5582 : : } else {
5583 : 0 : *table_group = group;
5584 : : }
5585 : : return 0;
5586 : : }
5587 : :
5588 : : /**
5589 : : * Translates group index specified by the user in @p attr to internal
5590 : : * group index.
5591 : : *
5592 : : * Translation is done by incrementing group index, so group n becomes n + 1.
5593 : : *
5594 : : * @param[in] dev
5595 : : * Pointer to Ethernet device.
5596 : : * @param[in] cfg
5597 : : * Pointer to the template table configuration.
5598 : : * @param[in] group
5599 : : * Currently used group index (table group or jump destination).
5600 : : * @param[out] table_group
5601 : : * Pointer to output group index.
5602 : : * @param[out] error
5603 : : * Pointer to error structure.
5604 : : *
5605 : : * @return
5606 : : * 0 on success. Otherwise, returns negative error code, rte_errno is set
5607 : : * and error structure is filled.
5608 : : */
5609 : : static int
5610 : : flow_hw_translate_group(struct rte_eth_dev *dev,
5611 : : const struct mlx5_flow_template_table_cfg *cfg,
5612 : : uint32_t group,
5613 : : uint32_t *table_group,
5614 : : struct rte_flow_error *error)
5615 : : {
5616 : 0 : const struct rte_flow_attr *flow_attr = &cfg->attr.flow_attr;
5617 : :
5618 : 0 : return __translate_group(dev, flow_attr, cfg->external, group, table_group, error);
5619 : : }
5620 : :
5621 : : /**
5622 : : * Create flow table.
5623 : : *
5624 : : * This function is a wrapper over @ref flow_hw_table_create(), which translates parameters
5625 : : * provided by user to proper internal values.
5626 : : *
5627 : : * @param[in] dev
5628 : : * Pointer to Ethernet device.
5629 : : * @param[in] attr
5630 : : * Pointer to the table attributes.
5631 : : * @param[in] item_templates
5632 : : * Item template array to be binded to the table.
5633 : : * @param[in] nb_item_templates
5634 : : * Number of item templates.
5635 : : * @param[in] action_templates
5636 : : * Action template array to be binded to the table.
5637 : : * @param[in] nb_action_templates
5638 : : * Number of action templates.
5639 : : * @param[out] error
5640 : : * Pointer to error structure.
5641 : : *
5642 : : * @return
5643 : : * Table on success, Otherwise, returns negative error code, rte_errno is set
5644 : : * and error structure is filled.
5645 : : */
5646 : : static struct rte_flow_template_table *
5647 : 0 : flow_hw_template_table_create(struct rte_eth_dev *dev,
5648 : : const struct rte_flow_template_table_attr *attr,
5649 : : struct rte_flow_pattern_template *item_templates[],
5650 : : uint8_t nb_item_templates,
5651 : : struct rte_flow_actions_template *action_templates[],
5652 : : uint8_t nb_action_templates,
5653 : : struct rte_flow_error *error)
5654 : : {
5655 : 0 : struct mlx5_flow_template_table_cfg cfg = {
5656 : : .attr = *attr,
5657 : : .external = true,
5658 : : };
5659 : 0 : uint32_t group = attr->flow_attr.group;
5660 : :
5661 [ # # ]: 0 : if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
5662 : : return NULL;
5663 [ # # # # ]: 0 : if (!cfg.attr.flow_attr.group &&
5664 : 0 : rte_flow_template_table_resizable(dev->data->port_id, attr)) {
5665 : 0 : rte_flow_error_set(error, EINVAL,
5666 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5667 : : "table cannot be resized: invalid group");
5668 : 0 : return NULL;
5669 : : }
5670 : 0 : return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
5671 : : action_templates, nb_action_templates, error);
5672 : : }
5673 : :
5674 : : static void
5675 : 0 : mlx5_destroy_multi_pattern_segment(struct mlx5_multi_pattern_segment *segment)
5676 : : {
5677 : : int i;
5678 : :
5679 [ # # ]: 0 : if (segment->mhdr_action)
5680 : 0 : mlx5dr_action_destroy(segment->mhdr_action);
5681 [ # # ]: 0 : for (i = 0; i < MLX5_MULTIPATTERN_ENCAP_NUM; i++) {
5682 [ # # ]: 0 : if (segment->reformat_action[i])
5683 : 0 : mlx5dr_action_destroy(segment->reformat_action[i]);
5684 : : }
5685 : 0 : segment->capacity = 0;
5686 : 0 : }
5687 : :
5688 : : static void
5689 : : flow_hw_destroy_table_multi_pattern_ctx(struct rte_flow_template_table *table)
5690 : : {
5691 : : int sx;
5692 : :
5693 [ # # ]: 0 : for (sx = 0; sx < MLX5_MAX_TABLE_RESIZE_NUM; sx++)
5694 : 0 : mlx5_destroy_multi_pattern_segment(table->mpctx.segments + sx);
5695 : : }
5696 : : /**
5697 : : * Destroy flow table.
5698 : : *
5699 : : * @param[in] dev
5700 : : * Pointer to the rte_eth_dev structure.
5701 : : * @param[in] table
5702 : : * Pointer to the table to be destroyed.
5703 : : * @param[out] error
5704 : : * Pointer to error structure.
5705 : : *
5706 : : * @return
5707 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5708 : : */
5709 : : static int
5710 : 0 : flow_hw_table_destroy(struct rte_eth_dev *dev,
5711 : : struct rte_flow_template_table *table,
5712 : : struct rte_flow_error *error)
5713 : : {
5714 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5715 : : int i;
5716 : 0 : uint32_t fidx = 1;
5717 : 0 : uint32_t ridx = 1;
5718 : :
5719 : : /* Build ipool allocated object bitmap. */
5720 [ # # ]: 0 : if (table->resource)
5721 : 0 : mlx5_ipool_flush_cache(table->resource);
5722 : 0 : mlx5_ipool_flush_cache(table->flow_pool);
5723 : : /* Check if ipool has allocated objects. */
5724 [ # # # # ]: 0 : if (table->refcnt ||
5725 : 0 : mlx5_ipool_get_next(table->flow_pool, &fidx) ||
5726 [ # # # # ]: 0 : (table->resource && mlx5_ipool_get_next(table->resource, &ridx))) {
5727 : 0 : DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
5728 : 0 : return rte_flow_error_set(error, EBUSY,
5729 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5730 : : NULL,
5731 : : "table is in use");
5732 : : }
5733 [ # # ]: 0 : LIST_REMOVE(table, next);
5734 [ # # ]: 0 : for (i = 0; i < table->nb_item_templates; i++)
5735 : 0 : rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
5736 : : 1, rte_memory_order_relaxed);
5737 [ # # ]: 0 : for (i = 0; i < table->nb_action_templates; i++) {
5738 : 0 : __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
5739 : 0 : rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
5740 : : 1, rte_memory_order_relaxed);
5741 : : }
5742 : : flow_hw_destroy_table_multi_pattern_ctx(table);
5743 [ # # ]: 0 : if (table->matcher_info[0].jump)
5744 : 0 : mlx5dr_action_destroy(table->matcher_info[0].jump);
5745 [ # # ]: 0 : if (table->matcher_info[0].matcher)
5746 : 0 : mlx5dr_matcher_destroy(table->matcher_info[0].matcher);
5747 [ # # ]: 0 : if (table->matcher_info[1].jump)
5748 : 0 : mlx5dr_action_destroy(table->matcher_info[1].jump);
5749 [ # # ]: 0 : if (table->matcher_info[1].matcher)
5750 : 0 : mlx5dr_matcher_destroy(table->matcher_info[1].matcher);
5751 : 0 : mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
5752 [ # # ]: 0 : if (table->resource)
5753 : 0 : mlx5_ipool_destroy(table->resource);
5754 : 0 : mlx5_free(table->flow_aux);
5755 : 0 : mlx5_ipool_destroy(table->flow_pool);
5756 : 0 : mlx5_free(table);
5757 : 0 : return 0;
5758 : : }
5759 : :
5760 : : /**
5761 : : * Parse group's miss actions.
5762 : : *
5763 : : * @param[in] dev
5764 : : * Pointer to the rte_eth_dev structure.
5765 : : * @param[in] cfg
5766 : : * Pointer to the table_cfg structure.
5767 : : * @param[in] actions
5768 : : * Array of actions to perform on group miss. Supported types:
5769 : : * RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5770 : : * @param[out] dst_group_id
5771 : : * Pointer to destination group id output. will be set to 0 if actions is END,
5772 : : * otherwise will be set to destination group id.
5773 : : * @param[out] error
5774 : : * Pointer to error structure.
5775 : : *
5776 : : * @return
5777 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5778 : : */
5779 : :
5780 : : static int
5781 : 0 : flow_hw_group_parse_miss_actions(struct rte_eth_dev *dev,
5782 : : struct mlx5_flow_template_table_cfg *cfg,
5783 : : const struct rte_flow_action actions[],
5784 : : uint32_t *dst_group_id,
5785 : : struct rte_flow_error *error)
5786 : : {
5787 : : const struct rte_flow_action_jump *jump_conf;
5788 : 0 : uint32_t temp = 0;
5789 : : uint32_t i;
5790 : :
5791 [ # # ]: 0 : for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
5792 [ # # # ]: 0 : switch (actions[i].type) {
5793 : 0 : case RTE_FLOW_ACTION_TYPE_VOID:
5794 : 0 : continue;
5795 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
5796 [ # # ]: 0 : if (temp)
5797 : 0 : return rte_flow_error_set(error, ENOTSUP,
5798 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, actions,
5799 : : "Miss actions can contain only a single JUMP");
5800 : :
5801 : 0 : jump_conf = (const struct rte_flow_action_jump *)actions[i].conf;
5802 [ # # ]: 0 : if (!jump_conf)
5803 : 0 : return rte_flow_error_set(error, EINVAL,
5804 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5805 : : jump_conf, "Jump conf must not be NULL");
5806 : :
5807 [ # # ]: 0 : if (flow_hw_translate_group(dev, cfg, jump_conf->group, &temp, error))
5808 : 0 : return -rte_errno;
5809 : :
5810 [ # # ]: 0 : if (!temp)
5811 : 0 : return rte_flow_error_set(error, EINVAL,
5812 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5813 : : "Failed to set group miss actions - Invalid target group");
5814 : : break;
5815 : 0 : default:
5816 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5817 : : &actions[i], "Unsupported default miss action type");
5818 : : }
5819 : : }
5820 : :
5821 : 0 : *dst_group_id = temp;
5822 : 0 : return 0;
5823 : : }
5824 : :
5825 : : /**
5826 : : * Set group's miss group.
5827 : : *
5828 : : * @param[in] dev
5829 : : * Pointer to the rte_eth_dev structure.
5830 : : * @param[in] cfg
5831 : : * Pointer to the table_cfg structure.
5832 : : * @param[in] src_grp
5833 : : * Pointer to source group structure.
5834 : : * if NULL, a new group will be created based on group id from cfg->attr.flow_attr.group.
5835 : : * @param[in] dst_grp
5836 : : * Pointer to destination group structure.
5837 : : * @param[out] error
5838 : : * Pointer to error structure.
5839 : : *
5840 : : * @return
5841 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5842 : : */
5843 : :
5844 : : static int
5845 : 0 : flow_hw_group_set_miss_group(struct rte_eth_dev *dev,
5846 : : struct mlx5_flow_template_table_cfg *cfg,
5847 : : struct mlx5_flow_group *src_grp,
5848 : : struct mlx5_flow_group *dst_grp,
5849 : : struct rte_flow_error *error)
5850 : : {
5851 : 0 : struct rte_flow_error sub_error = {
5852 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
5853 : : .cause = NULL,
5854 : : .message = NULL,
5855 : : };
5856 : 0 : struct mlx5_flow_cb_ctx ctx = {
5857 : : .dev = dev,
5858 : : .error = &sub_error,
5859 : 0 : .data = &cfg->attr.flow_attr,
5860 : 0 : .data2 = &cfg->attr.specialize,
5861 : : };
5862 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5863 : : struct mlx5_list_entry *ge;
5864 : : bool ref = false;
5865 : : int ret;
5866 : :
5867 [ # # ]: 0 : if (!dst_grp)
5868 : : return -EINVAL;
5869 : :
5870 : : /* If group doesn't exist - needs to be created. */
5871 [ # # ]: 0 : if (!src_grp) {
5872 : 0 : ge = mlx5_hlist_register(priv->sh->groups, cfg->attr.flow_attr.group, &ctx);
5873 [ # # ]: 0 : if (!ge)
5874 : 0 : return -rte_errno;
5875 : :
5876 : : src_grp = container_of(ge, struct mlx5_flow_group, entry);
5877 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5878 : : ref = true;
5879 [ # # ]: 0 : } else if (!src_grp->miss_group) {
5880 : : /* If group exists, but has no miss actions - need to increase ref_cnt. */
5881 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_grp, src_grp, next);
5882 : 0 : src_grp->entry.ref_cnt++;
5883 : : ref = true;
5884 : : }
5885 : :
5886 : 0 : ret = mlx5dr_table_set_default_miss(src_grp->tbl, dst_grp->tbl);
5887 [ # # ]: 0 : if (ret)
5888 : 0 : goto mlx5dr_error;
5889 : :
5890 : : /* If group existed and had old miss actions - ref_cnt is already correct.
5891 : : * However, need to reduce ref counter for old miss group.
5892 : : */
5893 [ # # ]: 0 : if (src_grp->miss_group)
5894 : 0 : mlx5_hlist_unregister(priv->sh->groups, &src_grp->miss_group->entry);
5895 : :
5896 : 0 : src_grp->miss_group = dst_grp;
5897 : 0 : return 0;
5898 : :
5899 : : mlx5dr_error:
5900 : : /* Reduce src_grp ref_cnt back & remove from grp list in case of mlx5dr error */
5901 [ # # ]: 0 : if (ref) {
5902 : 0 : mlx5_hlist_unregister(priv->sh->groups, &src_grp->entry);
5903 [ # # ]: 0 : LIST_REMOVE(src_grp, next);
5904 : : }
5905 : :
5906 : 0 : return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5907 : : "Failed to set group miss actions");
5908 : : }
5909 : :
5910 : : /**
5911 : : * Unset group's miss group.
5912 : : *
5913 : : * @param[in] dev
5914 : : * Pointer to the rte_eth_dev structure.
5915 : : * @param[in] grp
5916 : : * Pointer to group structure.
5917 : : * @param[out] error
5918 : : * Pointer to error structure.
5919 : : *
5920 : : * @return
5921 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5922 : : */
5923 : :
5924 : : static int
5925 : 0 : flow_hw_group_unset_miss_group(struct rte_eth_dev *dev,
5926 : : struct mlx5_flow_group *grp,
5927 : : struct rte_flow_error *error)
5928 : : {
5929 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
5930 : : int ret;
5931 : :
5932 : : /* If group doesn't exist - no need to change anything. */
5933 [ # # ]: 0 : if (!grp)
5934 : : return 0;
5935 : :
5936 : : /* If group exists, but miss actions is already default behavior -
5937 : : * no need to change anything.
5938 : : */
5939 [ # # ]: 0 : if (!grp->miss_group)
5940 : : return 0;
5941 : :
5942 : 0 : ret = mlx5dr_table_set_default_miss(grp->tbl, NULL);
5943 [ # # ]: 0 : if (ret)
5944 : 0 : return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5945 : : "Failed to unset group miss actions");
5946 : :
5947 : 0 : mlx5_hlist_unregister(priv->sh->groups, &grp->miss_group->entry);
5948 : 0 : grp->miss_group = NULL;
5949 : :
5950 [ # # ]: 0 : LIST_REMOVE(grp, next);
5951 : 0 : mlx5_hlist_unregister(priv->sh->groups, &grp->entry);
5952 : :
5953 : 0 : return 0;
5954 : : }
5955 : :
5956 : : /**
5957 : : * Set group miss actions.
5958 : : *
5959 : : * @param[in] dev
5960 : : * Pointer to the rte_eth_dev structure.
5961 : : * @param[in] group_id
5962 : : * Group id.
5963 : : * @param[in] attr
5964 : : * Pointer to group attributes structure.
5965 : : * @param[in] actions
5966 : : * Array of actions to perform on group miss. Supported types:
5967 : : * RTE_FLOW_ACTION_TYPE_JUMP, RTE_FLOW_ACTION_TYPE_VOID, RTE_FLOW_ACTION_TYPE_END.
5968 : : * @param[out] error
5969 : : * Pointer to error structure.
5970 : : *
5971 : : * @return
5972 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
5973 : : */
5974 : :
5975 : : int
5976 : 0 : mlx5_flow_hw_group_set_miss_actions(struct rte_eth_dev *dev,
5977 : : uint32_t group_id,
5978 : : const struct rte_flow_group_attr *attr,
5979 : : const struct rte_flow_action actions[],
5980 : : struct rte_flow_error *error)
5981 : : {
5982 : 0 : struct rte_flow_error sub_error = {
5983 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
5984 : : .cause = NULL,
5985 : : .message = NULL,
5986 : : };
5987 : 0 : struct mlx5_flow_template_table_cfg cfg = {
5988 : : .external = true,
5989 : : .attr = {
5990 : : .flow_attr = {
5991 : : .group = group_id,
5992 : 0 : .ingress = attr->ingress,
5993 : 0 : .egress = attr->egress,
5994 : 0 : .transfer = attr->transfer,
5995 : : },
5996 : : },
5997 : : };
5998 : 0 : struct mlx5_flow_cb_ctx ctx = {
5999 : : .dev = dev,
6000 : : .error = &sub_error,
6001 : : .data = &cfg.attr.flow_attr,
6002 : : .data2 = &cfg.attr.specialize,
6003 : : };
6004 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6005 : : struct mlx5_flow_group *src_grp = NULL;
6006 : : struct mlx5_flow_group *dst_grp = NULL;
6007 : : struct mlx5_list_entry *ge;
6008 : 0 : uint32_t dst_group_id = 0;
6009 : : int ret;
6010 : :
6011 [ # # ]: 0 : if (flow_hw_translate_group(dev, &cfg, group_id, &group_id, error))
6012 : 0 : return -rte_errno;
6013 : :
6014 [ # # ]: 0 : if (!group_id)
6015 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6016 : : NULL, "Failed to set group miss actions - invalid group id");
6017 : :
6018 : 0 : ret = flow_hw_group_parse_miss_actions(dev, &cfg, actions, &dst_group_id, error);
6019 [ # # ]: 0 : if (ret)
6020 : 0 : return -rte_errno;
6021 : :
6022 [ # # ]: 0 : if (dst_group_id == group_id) {
6023 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6024 : : NULL, "Failed to set group miss actions - target group id must differ from group_id");
6025 : : }
6026 : :
6027 : 0 : cfg.attr.flow_attr.group = group_id;
6028 : 0 : ge = mlx5_hlist_lookup(priv->sh->groups, group_id, &ctx);
6029 [ # # ]: 0 : if (ge)
6030 : : src_grp = container_of(ge, struct mlx5_flow_group, entry);
6031 : :
6032 [ # # ]: 0 : if (dst_group_id) {
6033 : : /* Increase ref_cnt for new miss group. */
6034 : 0 : cfg.attr.flow_attr.group = dst_group_id;
6035 : 0 : ge = mlx5_hlist_register(priv->sh->groups, dst_group_id, &ctx);
6036 [ # # ]: 0 : if (!ge)
6037 : 0 : return -rte_errno;
6038 : :
6039 : : dst_grp = container_of(ge, struct mlx5_flow_group, entry);
6040 : :
6041 : 0 : cfg.attr.flow_attr.group = group_id;
6042 : 0 : ret = flow_hw_group_set_miss_group(dev, &cfg, src_grp, dst_grp, error);
6043 [ # # ]: 0 : if (ret)
6044 : 0 : goto error;
6045 : : } else {
6046 : 0 : return flow_hw_group_unset_miss_group(dev, src_grp, error);
6047 : : }
6048 : :
6049 : : return 0;
6050 : :
6051 : : error:
6052 : : if (dst_grp)
6053 : 0 : mlx5_hlist_unregister(priv->sh->groups, &dst_grp->entry);
6054 : 0 : return -rte_errno;
6055 : : }
6056 : :
6057 : : static bool
6058 : : flow_hw_modify_field_is_used(const struct rte_flow_action_modify_field *action,
6059 : : enum rte_flow_field_id field)
6060 : : {
6061 [ # # # # : 0 : return action->src.field == field || action->dst.field == field;
# # # # #
# # # # #
# # # # #
# # # # #
# # # # ]
6062 : : }
6063 : :
6064 : : static bool
6065 : : flow_hw_modify_field_is_geneve_opt(enum rte_flow_field_id field)
6066 : : {
6067 : : return field == RTE_FLOW_FIELD_GENEVE_OPT_TYPE ||
6068 : 0 : field == RTE_FLOW_FIELD_GENEVE_OPT_CLASS ||
6069 : : field == RTE_FLOW_FIELD_GENEVE_OPT_DATA;
6070 : : }
6071 : :
6072 : : static bool
6073 : 0 : flow_hw_modify_field_is_add_dst_valid(const struct rte_flow_action_modify_field *conf)
6074 : : {
6075 [ # # ]: 0 : if (conf->operation != RTE_FLOW_MODIFY_ADD)
6076 : : return true;
6077 [ # # ]: 0 : if (conf->src.field == RTE_FLOW_FIELD_POINTER ||
6078 : : conf->src.field == RTE_FLOW_FIELD_VALUE)
6079 : : return true;
6080 [ # # ]: 0 : switch (conf->dst.field) {
6081 : : case RTE_FLOW_FIELD_IPV4_TTL:
6082 : : case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
6083 : : case RTE_FLOW_FIELD_TCP_SEQ_NUM:
6084 : : case RTE_FLOW_FIELD_TCP_ACK_NUM:
6085 : : case RTE_FLOW_FIELD_TAG:
6086 : : case RTE_FLOW_FIELD_META:
6087 : : case RTE_FLOW_FIELD_FLEX_ITEM:
6088 : : case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
6089 : : case RTE_FLOW_FIELD_IPV4_IHL:
6090 : : case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
6091 : : case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
6092 : : return true;
6093 : : default:
6094 : : break;
6095 : : }
6096 : 0 : return false;
6097 : : }
6098 : :
6099 : : /**
6100 : : * Validate the level value for modify field action.
6101 : : *
6102 : : * @param[in] data
6103 : : * Pointer to the rte_flow_field_data structure either src or dst.
6104 : : * @param[in] inner_supported
6105 : : * Indicator whether inner should be supported.
6106 : : * @param[out] error
6107 : : * Pointer to error structure.
6108 : : *
6109 : : * @return
6110 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
6111 : : */
6112 : : static int
6113 : 0 : flow_hw_validate_modify_field_level(const struct rte_flow_field_data *data,
6114 : : bool inner_supported,
6115 : : struct rte_flow_error *error)
6116 : : {
6117 [ # # # # : 0 : switch ((int)data->field) {
# ]
6118 : : case RTE_FLOW_FIELD_START:
6119 : : case RTE_FLOW_FIELD_VLAN_TYPE:
6120 : : case RTE_FLOW_FIELD_RANDOM:
6121 : : case RTE_FLOW_FIELD_FLEX_ITEM:
6122 : : /*
6123 : : * Level shouldn't be valid since field isn't supported or
6124 : : * doesn't use 'level'.
6125 : : */
6126 : : break;
6127 : : case RTE_FLOW_FIELD_MARK:
6128 : : case RTE_FLOW_FIELD_META:
6129 : : case RTE_FLOW_FIELD_METER_COLOR:
6130 : : case RTE_FLOW_FIELD_HASH_RESULT:
6131 : : /* For meta data fields encapsulation level is don't-care. */
6132 : : break;
6133 : 0 : case RTE_FLOW_FIELD_TAG:
6134 : : case MLX5_RTE_FLOW_FIELD_META_REG:
6135 : : /*
6136 : : * The tag array for RTE_FLOW_FIELD_TAG type is provided using
6137 : : * 'tag_index' field. In old API, it was provided using 'level'
6138 : : * field and it is still supported for backwards compatibility.
6139 : : * Therefore, for meta tag field only, level is matter. It is
6140 : : * taken as tag index when 'tag_index' field isn't set, and
6141 : : * return error otherwise.
6142 : : */
6143 [ # # ]: 0 : if (data->level > 0) {
6144 [ # # ]: 0 : if (data->tag_index > 0)
6145 : 0 : return rte_flow_error_set(error, EINVAL,
6146 : : RTE_FLOW_ERROR_TYPE_ACTION,
6147 : : data,
6148 : : "tag array can be provided using 'level' or 'tag_index' fields, not both");
6149 : 0 : DRV_LOG(WARNING,
6150 : : "tag array provided in 'level' field instead of 'tag_index' field.");
6151 : : }
6152 : : break;
6153 : 0 : case RTE_FLOW_FIELD_MAC_DST:
6154 : : case RTE_FLOW_FIELD_MAC_SRC:
6155 : : case RTE_FLOW_FIELD_MAC_TYPE:
6156 : : case RTE_FLOW_FIELD_IPV4_IHL:
6157 : : case RTE_FLOW_FIELD_IPV4_TOTAL_LEN:
6158 : : case RTE_FLOW_FIELD_IPV4_DSCP:
6159 : : case RTE_FLOW_FIELD_IPV4_ECN:
6160 : : case RTE_FLOW_FIELD_IPV4_TTL:
6161 : : case RTE_FLOW_FIELD_IPV4_SRC:
6162 : : case RTE_FLOW_FIELD_IPV4_DST:
6163 : : case RTE_FLOW_FIELD_IPV6_TRAFFIC_CLASS:
6164 : : case RTE_FLOW_FIELD_IPV6_FLOW_LABEL:
6165 : : case RTE_FLOW_FIELD_IPV6_PAYLOAD_LEN:
6166 : : case RTE_FLOW_FIELD_IPV6_HOPLIMIT:
6167 : : case RTE_FLOW_FIELD_IPV6_SRC:
6168 : : case RTE_FLOW_FIELD_IPV6_DST:
6169 : : case RTE_FLOW_FIELD_TCP_PORT_SRC:
6170 : : case RTE_FLOW_FIELD_TCP_PORT_DST:
6171 : : case RTE_FLOW_FIELD_TCP_FLAGS:
6172 : : case RTE_FLOW_FIELD_TCP_DATA_OFFSET:
6173 : : case RTE_FLOW_FIELD_UDP_PORT_SRC:
6174 : : case RTE_FLOW_FIELD_UDP_PORT_DST:
6175 [ # # ]: 0 : if (data->level > 2)
6176 : 0 : return rte_flow_error_set(error, ENOTSUP,
6177 : : RTE_FLOW_ERROR_TYPE_ACTION,
6178 : : data,
6179 : : "second inner header fields modification is not supported");
6180 [ # # ]: 0 : if (inner_supported)
6181 : : break;
6182 : : /* Fallthrough */
6183 : : case RTE_FLOW_FIELD_VLAN_ID:
6184 : : case RTE_FLOW_FIELD_IPV4_PROTO:
6185 : : case RTE_FLOW_FIELD_IPV6_PROTO:
6186 : : case RTE_FLOW_FIELD_IPV6_DSCP:
6187 : : case RTE_FLOW_FIELD_IPV6_ECN:
6188 : : case RTE_FLOW_FIELD_TCP_SEQ_NUM:
6189 : : case RTE_FLOW_FIELD_TCP_ACK_NUM:
6190 : : case RTE_FLOW_FIELD_ESP_PROTO:
6191 : : case RTE_FLOW_FIELD_ESP_SPI:
6192 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
6193 : : case RTE_FLOW_FIELD_VXLAN_VNI:
6194 : : case RTE_FLOW_FIELD_VXLAN_LAST_RSVD:
6195 : : case RTE_FLOW_FIELD_GENEVE_VNI:
6196 : : case RTE_FLOW_FIELD_GENEVE_OPT_TYPE:
6197 : : case RTE_FLOW_FIELD_GENEVE_OPT_CLASS:
6198 : : case RTE_FLOW_FIELD_GENEVE_OPT_DATA:
6199 : : case RTE_FLOW_FIELD_GTP_TEID:
6200 : : case RTE_FLOW_FIELD_GTP_PSC_QFI:
6201 [ # # ]: 0 : if (data->level > 1)
6202 : 0 : return rte_flow_error_set(error, ENOTSUP,
6203 : : RTE_FLOW_ERROR_TYPE_ACTION,
6204 : : data,
6205 : : "inner header fields modification is not supported");
6206 : : break;
6207 : 0 : case RTE_FLOW_FIELD_MPLS:
6208 [ # # ]: 0 : if (data->level == 1)
6209 : 0 : return rte_flow_error_set(error, ENOTSUP,
6210 : : RTE_FLOW_ERROR_TYPE_ACTION,
6211 : : data,
6212 : : "outer MPLS header modification is not supported");
6213 [ # # ]: 0 : if (data->level > 2)
6214 : 0 : return rte_flow_error_set(error, ENOTSUP,
6215 : : RTE_FLOW_ERROR_TYPE_ACTION,
6216 : : data,
6217 : : "inner MPLS header modification is not supported");
6218 : : break;
6219 : 0 : case RTE_FLOW_FIELD_POINTER:
6220 : : case RTE_FLOW_FIELD_VALUE:
6221 : : default:
6222 : : MLX5_ASSERT(false);
6223 : : }
6224 : : return 0;
6225 : : }
6226 : :
6227 : : static int
6228 : 0 : flow_hw_validate_action_modify_field(struct rte_eth_dev *dev,
6229 : : const struct rte_flow_action *action,
6230 : : const struct rte_flow_action *mask,
6231 : : struct rte_flow_error *error)
6232 : : {
6233 : 0 : const struct rte_flow_action_modify_field *action_conf = action->conf;
6234 : 0 : const struct rte_flow_action_modify_field *mask_conf = mask->conf;
6235 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6236 : 0 : struct mlx5_hca_attr *attr = &priv->sh->cdev->config.hca_attr;
6237 : : int ret;
6238 : :
6239 [ # # ]: 0 : if (!mask_conf)
6240 : 0 : return rte_flow_error_set(error, EINVAL,
6241 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6242 : : "modify_field mask conf is missing");
6243 [ # # ]: 0 : if (action_conf->operation != mask_conf->operation)
6244 : 0 : return rte_flow_error_set(error, EINVAL,
6245 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6246 : : "modify_field operation mask and template are not equal");
6247 [ # # ]: 0 : if (action_conf->dst.field != mask_conf->dst.field)
6248 : 0 : return rte_flow_error_set(error, EINVAL,
6249 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6250 : : "destination field mask and template are not equal");
6251 : 0 : if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
6252 [ # # ]: 0 : action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
6253 : : action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
6254 : 0 : return rte_flow_error_set(error, EINVAL,
6255 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6256 : : "immediate value, pointer and hash result cannot be used as destination");
6257 : 0 : ret = flow_hw_validate_modify_field_level(&action_conf->dst, false, error);
6258 [ # # ]: 0 : if (ret)
6259 : : return ret;
6260 [ # # # # ]: 0 : if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM &&
6261 : : !flow_hw_modify_field_is_geneve_opt(action_conf->dst.field)) {
6262 [ # # ]: 0 : if (action_conf->dst.tag_index &&
6263 : : !flow_modify_field_support_tag_array(action_conf->dst.field))
6264 : 0 : return rte_flow_error_set(error, EINVAL,
6265 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6266 : : "destination tag index is not supported");
6267 [ # # ]: 0 : if (action_conf->dst.class_id)
6268 : 0 : return rte_flow_error_set(error, EINVAL,
6269 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6270 : : "destination class id is not supported");
6271 : : }
6272 [ # # ]: 0 : if (mask_conf->dst.level != UINT8_MAX)
6273 : 0 : return rte_flow_error_set(error, EINVAL,
6274 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6275 : : "destination encapsulation level must be fully masked");
6276 [ # # ]: 0 : if (mask_conf->dst.offset != UINT32_MAX)
6277 : 0 : return rte_flow_error_set(error, EINVAL,
6278 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6279 : : "destination offset level must be fully masked");
6280 [ # # ]: 0 : if (action_conf->src.field != mask_conf->src.field)
6281 : 0 : return rte_flow_error_set(error, EINVAL,
6282 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6283 : : "destination field mask and template are not equal");
6284 [ # # ]: 0 : if (action_conf->src.field != RTE_FLOW_FIELD_POINTER &&
6285 : : action_conf->src.field != RTE_FLOW_FIELD_VALUE) {
6286 [ # # # # ]: 0 : if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM &&
6287 : : !flow_hw_modify_field_is_geneve_opt(action_conf->src.field)) {
6288 [ # # ]: 0 : if (action_conf->src.tag_index &&
6289 : : !flow_modify_field_support_tag_array(action_conf->src.field))
6290 : 0 : return rte_flow_error_set(error, EINVAL,
6291 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6292 : : "source tag index is not supported");
6293 [ # # ]: 0 : if (action_conf->src.class_id)
6294 : 0 : return rte_flow_error_set(error, EINVAL,
6295 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6296 : : "source class id is not supported");
6297 : : }
6298 [ # # ]: 0 : if (mask_conf->src.level != UINT8_MAX)
6299 : 0 : return rte_flow_error_set(error, EINVAL,
6300 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6301 : : "source encapsulation level must be fully masked");
6302 [ # # ]: 0 : if (mask_conf->src.offset != UINT32_MAX)
6303 : 0 : return rte_flow_error_set(error, EINVAL,
6304 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6305 : : "source offset level must be fully masked");
6306 : 0 : ret = flow_hw_validate_modify_field_level(&action_conf->src, true, error);
6307 [ # # ]: 0 : if (ret)
6308 : : return ret;
6309 : : }
6310 [ # # ]: 0 : if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6311 [ # # # # ]: 0 : action_conf->dst.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6312 : 0 : action_conf->dst.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) ||
6313 [ # # ]: 0 : (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6314 [ # # # # ]: 0 : action_conf->src.tag_index >= MLX5_FLOW_HW_TAGS_MAX &&
6315 : : action_conf->src.tag_index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX))
6316 : 0 : return rte_flow_error_set(error, EINVAL,
6317 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6318 : : "tag index is out of range");
6319 [ # # # # ]: 0 : if ((action_conf->dst.field == RTE_FLOW_FIELD_TAG &&
6320 [ # # # # ]: 0 : flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->dst.tag_index) == REG_NON) ||
6321 [ # # ]: 0 : (action_conf->src.field == RTE_FLOW_FIELD_TAG &&
6322 [ # # ]: 0 : flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, action_conf->src.tag_index) == REG_NON))
6323 : 0 : return rte_flow_error_set(error, EINVAL,
6324 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6325 : : "tag index is out of range");
6326 [ # # ]: 0 : if (mask_conf->width != UINT32_MAX)
6327 : 0 : return rte_flow_error_set(error, EINVAL,
6328 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6329 : : "modify_field width field must be fully masked");
6330 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_START))
6331 : 0 : return rte_flow_error_set(error, EINVAL,
6332 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6333 : : "modifying arbitrary place in a packet is not supported");
6334 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_VLAN_TYPE))
6335 : 0 : return rte_flow_error_set(error, EINVAL,
6336 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6337 : : "modifying vlan_type is not supported");
6338 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_RANDOM))
6339 : 0 : return rte_flow_error_set(error, EINVAL,
6340 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6341 : : "modifying random value is not supported");
6342 : : /**
6343 : : * Geneve VNI modification is supported only when Geneve header is
6344 : : * parsed natively. When GENEVE options are supported, they both Geneve
6345 : : * and options headers are parsed as a flex parser.
6346 : : */
6347 [ # # ]: 0 : if (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_VNI) &&
6348 [ # # ]: 0 : attr->geneve_tlv_opt)
6349 : 0 : return rte_flow_error_set(error, EINVAL,
6350 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6351 : : "modifying Geneve VNI is not supported when GENEVE opt is supported");
6352 [ # # # # ]: 0 : if (priv->tlv_options == NULL &&
6353 [ # # ]: 0 : (flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_TYPE) ||
6354 [ # # ]: 0 : flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_CLASS) ||
6355 : : flow_hw_modify_field_is_used(action_conf, RTE_FLOW_FIELD_GENEVE_OPT_DATA)))
6356 : 0 : return rte_flow_error_set(error, EINVAL,
6357 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6358 : : "modifying Geneve TLV option is supported only after parser configuration");
6359 : : /* Due to HW bug, tunnel MPLS header is read only. */
6360 [ # # ]: 0 : if (action_conf->dst.field == RTE_FLOW_FIELD_MPLS)
6361 : 0 : return rte_flow_error_set(error, EINVAL,
6362 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6363 : : "MPLS cannot be used as destination");
6364 : : /* ADD_FIELD is not supported for all the fields. */
6365 [ # # ]: 0 : if (!flow_hw_modify_field_is_add_dst_valid(action_conf))
6366 : 0 : return rte_flow_error_set(error, EINVAL,
6367 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6368 : : "invalid add_field destination");
6369 : : return 0;
6370 : : }
6371 : :
6372 : : static int
6373 : 0 : flow_hw_validate_action_port_representor(struct rte_eth_dev *dev __rte_unused,
6374 : : const struct rte_flow_actions_template_attr *attr,
6375 : : const struct rte_flow_action *action,
6376 : : const struct rte_flow_action *mask,
6377 : : struct rte_flow_error *error)
6378 : : {
6379 : : const struct rte_flow_action_ethdev *action_conf = NULL;
6380 : : const struct rte_flow_action_ethdev *mask_conf = NULL;
6381 : :
6382 : : /* If transfer is set, port has been validated as proxy port. */
6383 [ # # ]: 0 : if (!attr->transfer)
6384 : 0 : return rte_flow_error_set(error, EINVAL,
6385 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6386 : : "cannot use port_representor actions"
6387 : : " without an E-Switch");
6388 [ # # ]: 0 : if (!action || !mask)
6389 : 0 : return rte_flow_error_set(error, EINVAL,
6390 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6391 : : "actiona and mask configuration must be set");
6392 : 0 : action_conf = action->conf;
6393 : 0 : mask_conf = mask->conf;
6394 [ # # # # : 0 : if (!mask_conf || mask_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR ||
# # ]
6395 [ # # ]: 0 : !action_conf || action_conf->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
6396 : 0 : return rte_flow_error_set(error, EINVAL,
6397 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6398 : : "only eswitch manager port 0xffff is"
6399 : : " supported");
6400 : : return 0;
6401 : : }
6402 : :
6403 : : static int
6404 : 0 : flow_hw_validate_target_port_id(struct rte_eth_dev *dev,
6405 : : uint16_t target_port_id)
6406 : : {
6407 : : struct mlx5_priv *port_priv;
6408 : : struct mlx5_priv *dev_priv;
6409 : :
6410 [ # # ]: 0 : if (target_port_id == MLX5_REPRESENTED_PORT_ESW_MGR)
6411 : : return 0;
6412 : :
6413 : 0 : port_priv = mlx5_port_to_eswitch_info(target_port_id, false);
6414 [ # # ]: 0 : if (!port_priv) {
6415 : 0 : rte_errno = EINVAL;
6416 : 0 : DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for port %u",
6417 : : dev->data->port_id, target_port_id);
6418 : 0 : return -rte_errno;
6419 : : }
6420 : :
6421 : 0 : dev_priv = mlx5_dev_to_eswitch_info(dev);
6422 [ # # ]: 0 : if (!dev_priv) {
6423 : 0 : rte_errno = EINVAL;
6424 : 0 : DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6425 : : dev->data->port_id);
6426 : 0 : return -rte_errno;
6427 : : }
6428 : :
6429 [ # # ]: 0 : if (port_priv->domain_id != dev_priv->domain_id) {
6430 : 0 : rte_errno = EINVAL;
6431 : 0 : DRV_LOG(ERR, "Port %u Failed to obtain E-Switch info for transfer proxy",
6432 : : dev->data->port_id);
6433 : 0 : return -rte_errno;
6434 : : }
6435 : :
6436 : : return 0;
6437 : : }
6438 : :
6439 : : static int
6440 : 0 : flow_hw_validate_action_represented_port(struct rte_eth_dev *dev,
6441 : : const struct rte_flow_action *action,
6442 : : const struct rte_flow_action *mask,
6443 : : struct rte_flow_error *error)
6444 : : {
6445 : 0 : const struct rte_flow_action_ethdev *action_conf = action->conf;
6446 : 0 : const struct rte_flow_action_ethdev *mask_conf = mask->conf;
6447 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6448 : :
6449 [ # # ]: 0 : if (!priv->sh->config.dv_esw_en)
6450 : 0 : return rte_flow_error_set(error, EINVAL,
6451 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6452 : : "cannot use represented_port actions"
6453 : : " without an E-Switch");
6454 [ # # # # ]: 0 : if (mask_conf && mask_conf->port_id) {
6455 [ # # ]: 0 : if (!action_conf)
6456 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
6457 : : action, "port index was not provided");
6458 : :
6459 [ # # ]: 0 : if (flow_hw_validate_target_port_id(dev, action_conf->port_id))
6460 : 0 : return rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
6461 : : action, "port index is invalid");
6462 : : }
6463 : : return 0;
6464 : : }
6465 : :
6466 : : /**
6467 : : * Validate AGE action.
6468 : : *
6469 : : * @param[in] dev
6470 : : * Pointer to rte_eth_dev structure.
6471 : : * @param[in] action
6472 : : * Pointer to the indirect action.
6473 : : * @param[in] action_flags
6474 : : * Holds the actions detected until now.
6475 : : * @param[in] fixed_cnt
6476 : : * Indicator if this list has a fixed COUNT action.
6477 : : * @param[out] error
6478 : : * Pointer to error structure.
6479 : : *
6480 : : * @return
6481 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
6482 : : */
6483 : : static int
6484 : 0 : flow_hw_validate_action_age(struct rte_eth_dev *dev,
6485 : : const struct rte_flow_action *action,
6486 : : uint64_t action_flags, bool fixed_cnt,
6487 : : struct rte_flow_error *error)
6488 : : {
6489 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6490 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
6491 : :
6492 [ # # ]: 0 : if (!priv->sh->cdev->config.devx)
6493 : 0 : return rte_flow_error_set(error, ENOTSUP,
6494 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6495 : : NULL, "AGE action not supported");
6496 [ # # ]: 0 : if (age_info->ages_ipool == NULL)
6497 : 0 : return rte_flow_error_set(error, EINVAL,
6498 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6499 : : "aging pool not initialized");
6500 [ # # ]: 0 : if ((action_flags & MLX5_FLOW_ACTION_AGE) ||
6501 : : (action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6502 : 0 : return rte_flow_error_set(error, EINVAL,
6503 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6504 : : "duplicate AGE actions set");
6505 [ # # ]: 0 : if (fixed_cnt)
6506 : 0 : return rte_flow_error_set(error, EINVAL,
6507 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6508 : : "AGE and fixed COUNT combination is not supported");
6509 : : return 0;
6510 : : }
6511 : :
6512 : : /**
6513 : : * Validate count action.
6514 : : *
6515 : : * @param[in] dev
6516 : : * Pointer to rte_eth_dev structure.
6517 : : * @param[in] action
6518 : : * Pointer to the indirect action.
6519 : : * @param[in] mask
6520 : : * Pointer to the indirect action mask.
6521 : : * @param[in] action_flags
6522 : : * Holds the actions detected until now.
6523 : : * @param[out] error
6524 : : * Pointer to error structure.
6525 : : *
6526 : : * @return
6527 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
6528 : : */
6529 : : static int
6530 : 0 : flow_hw_validate_action_count(struct rte_eth_dev *dev,
6531 : : const struct rte_flow_action *action,
6532 : : const struct rte_flow_action *mask,
6533 : : uint64_t action_flags,
6534 : : struct rte_flow_error *error)
6535 : : {
6536 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6537 : 0 : const struct rte_flow_action_count *count = mask->conf;
6538 : :
6539 [ # # ]: 0 : if (!priv->sh->cdev->config.devx)
6540 : 0 : return rte_flow_error_set(error, ENOTSUP,
6541 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6542 : : "count action not supported");
6543 [ # # ]: 0 : if (!priv->hws_cpool)
6544 : 0 : return rte_flow_error_set(error, EINVAL,
6545 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6546 : : "counters pool not initialized");
6547 [ # # ]: 0 : if ((action_flags & MLX5_FLOW_ACTION_COUNT) ||
6548 : : (action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT))
6549 : 0 : return rte_flow_error_set(error, EINVAL,
6550 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6551 : : "duplicate count actions set");
6552 [ # # # # : 0 : if (count && count->id && (action_flags & MLX5_FLOW_ACTION_AGE))
# # ]
6553 : 0 : return rte_flow_error_set(error, EINVAL,
6554 : : RTE_FLOW_ERROR_TYPE_ACTION, mask,
6555 : : "AGE and COUNT action shared by mask combination is not supported");
6556 : : return 0;
6557 : : }
6558 : :
6559 : : /**
6560 : : * Validate meter_mark action.
6561 : : *
6562 : : * @param[in] dev
6563 : : * Pointer to rte_eth_dev structure.
6564 : : * @param[in] action
6565 : : * Pointer to the indirect action.
6566 : : * @param[in] indirect
6567 : : * If true, then provided action was passed using an indirect action.
6568 : : * @param[out] error
6569 : : * Pointer to error structure.
6570 : : *
6571 : : * @return
6572 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
6573 : : */
6574 : : static int
6575 : 0 : flow_hw_validate_action_meter_mark(struct rte_eth_dev *dev,
6576 : : const struct rte_flow_action *action,
6577 : : bool indirect,
6578 : : struct rte_flow_error *error)
6579 : : {
6580 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6581 : :
6582 : : RTE_SET_USED(action);
6583 : :
6584 [ # # ]: 0 : if (!priv->sh->cdev->config.devx)
6585 : 0 : return rte_flow_error_set(error, ENOTSUP,
6586 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6587 : : "meter_mark action not supported");
6588 [ # # # # ]: 0 : if (!indirect && priv->shared_host)
6589 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action,
6590 : : "meter_mark action can only be used on host port");
6591 [ # # ]: 0 : if (!priv->hws_mpool)
6592 : 0 : return rte_flow_error_set(error, EINVAL,
6593 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6594 : : "meter_mark pool not initialized");
6595 : : return 0;
6596 : : }
6597 : :
6598 : : /**
6599 : : * Validate indirect action.
6600 : : *
6601 : : * @param[in] dev
6602 : : * Pointer to rte_eth_dev structure.
6603 : : * @param[in] action
6604 : : * Pointer to the indirect action.
6605 : : * @param[in] mask
6606 : : * Pointer to the indirect action mask.
6607 : : * @param[in, out] action_flags
6608 : : * Holds the actions detected until now.
6609 : : * @param[in, out] fixed_cnt
6610 : : * Pointer to indicator if this list has a fixed COUNT action.
6611 : : * @param[out] error
6612 : : * Pointer to error structure.
6613 : : *
6614 : : * @return
6615 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
6616 : : */
6617 : : static int
6618 : 0 : flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
6619 : : const struct rte_flow_action *action,
6620 : : const struct rte_flow_action *mask,
6621 : : uint64_t *action_flags, bool *fixed_cnt,
6622 : : struct rte_flow_error *error)
6623 : : {
6624 : : uint32_t type;
6625 : : int ret;
6626 : :
6627 [ # # ]: 0 : if (!mask)
6628 : 0 : return rte_flow_error_set(error, EINVAL,
6629 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6630 : : "Unable to determine indirect action type without a mask specified");
6631 : 0 : type = mask->type;
6632 [ # # # # : 0 : switch (type) {
# # # ]
6633 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
6634 : 0 : ret = flow_hw_validate_action_meter_mark(dev, mask, true, error);
6635 [ # # ]: 0 : if (ret < 0)
6636 : : return ret;
6637 : 0 : *action_flags |= MLX5_FLOW_ACTION_METER;
6638 : 0 : break;
6639 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
6640 : : /* TODO: Validation logic (same as flow_hw_actions_validate) */
6641 : 0 : *action_flags |= MLX5_FLOW_ACTION_RSS;
6642 : 0 : break;
6643 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6644 : : /* TODO: Validation logic (same as flow_hw_actions_validate) */
6645 : 0 : *action_flags |= MLX5_FLOW_ACTION_CT;
6646 : 0 : break;
6647 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
6648 [ # # # # ]: 0 : if (action->conf && mask->conf) {
6649 [ # # ]: 0 : if ((*action_flags & MLX5_FLOW_ACTION_AGE) ||
6650 : : (*action_flags & MLX5_FLOW_ACTION_INDIRECT_AGE))
6651 : : /*
6652 : : * AGE cannot use indirect counter which is
6653 : : * shared with enother flow rules.
6654 : : */
6655 : 0 : return rte_flow_error_set(error, EINVAL,
6656 : : RTE_FLOW_ERROR_TYPE_ACTION,
6657 : : NULL,
6658 : : "AGE and fixed COUNT combination is not supported");
6659 : 0 : *fixed_cnt = true;
6660 : : }
6661 : 0 : ret = flow_hw_validate_action_count(dev, action, mask,
6662 : : *action_flags, error);
6663 [ # # ]: 0 : if (ret < 0)
6664 : : return ret;
6665 : 0 : *action_flags |= MLX5_FLOW_ACTION_INDIRECT_COUNT;
6666 : 0 : break;
6667 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
6668 [ # # # # ]: 0 : if (action->conf && mask->conf)
6669 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
6670 : : action,
6671 : : "Fixed indirect age action is not supported");
6672 : 0 : ret = flow_hw_validate_action_age(dev, action, *action_flags,
6673 : 0 : *fixed_cnt, error);
6674 [ # # ]: 0 : if (ret < 0)
6675 : : return ret;
6676 : 0 : *action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
6677 : 0 : break;
6678 : 0 : case RTE_FLOW_ACTION_TYPE_QUOTA:
6679 : : /* TODO: add proper quota verification */
6680 : 0 : *action_flags |= MLX5_FLOW_ACTION_QUOTA;
6681 : 0 : break;
6682 : 0 : default:
6683 : 0 : DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
6684 : 0 : return rte_flow_error_set(error, ENOTSUP,
6685 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, mask,
6686 : : "Unsupported indirect action type");
6687 : : }
6688 : : return 0;
6689 : : }
6690 : :
6691 : : /**
6692 : : * Validate ipv6_ext_push action.
6693 : : *
6694 : : * @param[in] dev
6695 : : * Pointer to rte_eth_dev structure.
6696 : : * @param[in] action
6697 : : * Pointer to the indirect action.
6698 : : * @param[out] error
6699 : : * Pointer to error structure.
6700 : : *
6701 : : * @return
6702 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
6703 : : */
6704 : : static int
6705 : 0 : flow_hw_validate_action_ipv6_ext_push(struct rte_eth_dev *dev __rte_unused,
6706 : : const struct rte_flow_action *action,
6707 : : struct rte_flow_error *error)
6708 : : {
6709 : 0 : const struct rte_flow_action_ipv6_ext_push *raw_push_data = action->conf;
6710 : :
6711 [ # # # # : 0 : if (!raw_push_data || !raw_push_data->size || !raw_push_data->data)
# # ]
6712 : 0 : return rte_flow_error_set(error, EINVAL,
6713 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6714 : : "invalid ipv6_ext_push data");
6715 [ # # # # ]: 0 : if (raw_push_data->type != IPPROTO_ROUTING ||
6716 : : raw_push_data->size > MLX5_PUSH_MAX_LEN)
6717 : 0 : return rte_flow_error_set(error, EINVAL,
6718 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6719 : : "Unsupported ipv6_ext_push type or length");
6720 : : return 0;
6721 : : }
6722 : :
6723 : : /**
6724 : : * Process `... / raw_decap / raw_encap / ...` actions sequence.
6725 : : * The PMD handles the sequence as a single encap or decap reformat action,
6726 : : * depending on the raw_encap configuration.
6727 : : *
6728 : : * The function assumes that the raw_decap / raw_encap location
6729 : : * in actions template list complies with relative HWS actions order:
6730 : : * for the required reformat configuration:
6731 : : * ENCAP configuration must appear before [JUMP|DROP|PORT]
6732 : : * DECAP configuration must appear at the template head.
6733 : : */
6734 : : static uint64_t
6735 : : mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
6736 : : uint32_t encap_ind, uint64_t flags)
6737 : : {
6738 : 0 : const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
6739 : :
6740 [ # # ]: 0 : if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
6741 : : return MLX5_FLOW_ACTION_ENCAP;
6742 [ # # ]: 0 : if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6743 : : return MLX5_FLOW_ACTION_ENCAP;
6744 : 0 : return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
6745 [ # # ]: 0 : MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
6746 : : }
6747 : :
6748 : : enum mlx5_hw_indirect_list_relative_position {
6749 : : MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1,
6750 : : MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0,
6751 : : MLX5_INDIRECT_LIST_POSITION_AFTER_MH,
6752 : : };
6753 : :
6754 : : static enum mlx5_hw_indirect_list_relative_position
6755 : 0 : mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action)
6756 : : {
6757 : 0 : const struct rte_flow_action_indirect_list *conf = action->conf;
6758 [ # # # # ]: 0 : enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle);
6759 : : enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6760 : : const union {
6761 : : struct mlx5_indlst_legacy *legacy;
6762 : : struct mlx5_hw_encap_decap_action *reformat;
6763 : : struct rte_flow_action_list_handle *handle;
6764 : : } h = { .handle = conf->handle};
6765 : :
6766 [ # # # # ]: 0 : switch (list_type) {
6767 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
6768 [ # # # ]: 0 : switch (h.legacy->legacy_type) {
6769 : : case RTE_FLOW_ACTION_TYPE_AGE:
6770 : : case RTE_FLOW_ACTION_TYPE_COUNT:
6771 : : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
6772 : : case RTE_FLOW_ACTION_TYPE_METER_MARK:
6773 : : case RTE_FLOW_ACTION_TYPE_QUOTA:
6774 : : pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6775 : : break;
6776 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
6777 : : pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6778 : 0 : break;
6779 : 0 : default:
6780 : : pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6781 : 0 : break;
6782 : : }
6783 : : break;
6784 : : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
6785 : : pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6786 : : break;
6787 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
6788 [ # # # ]: 0 : switch (h.reformat->action_type) {
6789 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2:
6790 : : case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
6791 : : pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH;
6792 : : break;
6793 : 0 : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
6794 : : case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
6795 : : pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH;
6796 : 0 : break;
6797 : 0 : default:
6798 : : pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6799 : 0 : break;
6800 : : }
6801 : : break;
6802 : 0 : default:
6803 : : pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN;
6804 : 0 : break;
6805 : : }
6806 : 0 : return pos;
6807 : : }
6808 : :
6809 : : #define MLX5_HW_EXPAND_MH_FAILED 0xffff
6810 : :
6811 : : static inline uint16_t
6812 : 0 : flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
6813 : : struct rte_flow_action masks[],
6814 : : const struct rte_flow_action *mf_actions,
6815 : : const struct rte_flow_action *mf_masks,
6816 : : uint64_t flags, uint32_t act_num,
6817 : : uint32_t mf_num)
6818 : : {
6819 : : uint32_t i, tail;
6820 : :
6821 : : MLX5_ASSERT(actions && masks);
6822 : : MLX5_ASSERT(mf_num > 0);
6823 [ # # ]: 0 : if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
6824 : : /*
6825 : : * Application action template already has Modify Field.
6826 : : * It's location will be used in DR.
6827 : : * Expanded MF action can be added before the END.
6828 : : */
6829 : 0 : i = act_num - 1;
6830 : 0 : goto insert;
6831 : : }
6832 : : /**
6833 : : * Locate the first action positioned BEFORE the new MF.
6834 : : *
6835 : : * Search for a place to insert modify header
6836 : : * from the END action backwards:
6837 : : * 1. END is always present in actions array
6838 : : * 2. END location is always at action[act_num - 1]
6839 : : * 3. END always positioned AFTER modify field location
6840 : : *
6841 : : * Relative actions order is the same for RX, TX and FDB.
6842 : : *
6843 : : * Current actions order (draft-3)
6844 : : * @see action_order_arr[]
6845 : : */
6846 [ # # ]: 0 : for (i = act_num - 2; (int)i >= 0; i--) {
6847 : : enum mlx5_hw_indirect_list_relative_position pos;
6848 : 0 : enum rte_flow_action_type type = actions[i].type;
6849 : : uint64_t reformat_type;
6850 : :
6851 [ # # ]: 0 : if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
6852 : 0 : type = masks[i].type;
6853 [ # # # # ]: 0 : switch (type) {
6854 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
6855 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
6856 : : case RTE_FLOW_ACTION_TYPE_DROP:
6857 : : case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
6858 : : case RTE_FLOW_ACTION_TYPE_JUMP:
6859 : : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
6860 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
6861 : : case RTE_FLOW_ACTION_TYPE_RSS:
6862 : : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
6863 : : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
6864 : : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
6865 : : case RTE_FLOW_ACTION_TYPE_VOID:
6866 : : case RTE_FLOW_ACTION_TYPE_END:
6867 : : break;
6868 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
6869 : : reformat_type =
6870 : : mlx5_decap_encap_reformat_type(actions, i,
6871 : : flags);
6872 : : if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
6873 : 0 : i++;
6874 : 0 : goto insert;
6875 : : }
6876 [ # # ]: 0 : if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
6877 : : i--;
6878 : : break;
6879 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
6880 : 0 : pos = mlx5_hw_indirect_list_mh_position(&actions[i]);
6881 [ # # ]: 0 : if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN)
6882 : : return MLX5_HW_EXPAND_MH_FAILED;
6883 [ # # ]: 0 : if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH)
6884 : 0 : goto insert;
6885 : : break;
6886 : 0 : default:
6887 : 0 : i++; /* new MF inserted AFTER actions[i] */
6888 : 0 : goto insert;
6889 : : }
6890 : : }
6891 : : i = 0;
6892 : 0 : insert:
6893 : 0 : tail = act_num - i; /* num action to move */
6894 : 0 : memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
6895 : 0 : memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
6896 : 0 : memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
6897 : : memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
6898 : 0 : return i;
6899 : : }
6900 : :
6901 : : static int
6902 : 0 : flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev,
6903 : : const
6904 : : struct rte_flow_actions_template_attr *attr,
6905 : : const struct rte_flow_action *action,
6906 : : const struct rte_flow_action *mask,
6907 : : struct rte_flow_error *error)
6908 : : {
6909 : : #define X_FIELD(ptr, t, f) (((ptr)->conf) && ((t *)((ptr)->conf))->f)
6910 : :
6911 : 0 : const bool masked_push =
6912 [ # # # # ]: 0 : X_FIELD(mask + MLX5_HW_VLAN_PUSH_TYPE_IDX,
6913 : : const struct rte_flow_action_of_push_vlan, ethertype);
6914 : : bool masked_param;
6915 : :
6916 : : /*
6917 : : * Mandatory actions order:
6918 : : * OF_PUSH_VLAN / OF_SET_VLAN_VID [ / OF_SET_VLAN_PCP ]
6919 : : */
6920 : : RTE_SET_USED(dev);
6921 : : RTE_SET_USED(attr);
6922 : : /* Check that mark matches OF_PUSH_VLAN */
6923 [ # # ]: 0 : if (mask[MLX5_HW_VLAN_PUSH_TYPE_IDX].type !=
6924 : : RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
6925 : 0 : return rte_flow_error_set(error, EINVAL,
6926 : : RTE_FLOW_ERROR_TYPE_ACTION,
6927 : : action, "OF_PUSH_VLAN: mask does not match");
6928 : : /* Check that the second template and mask items are SET_VLAN_VID */
6929 [ # # ]: 0 : if (action[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6930 : 0 : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID ||
6931 [ # # ]: 0 : mask[MLX5_HW_VLAN_PUSH_VID_IDX].type !=
6932 : : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
6933 : 0 : return rte_flow_error_set(error, EINVAL,
6934 : : RTE_FLOW_ERROR_TYPE_ACTION,
6935 : : action, "OF_PUSH_VLAN: invalid actions order");
6936 [ # # # # ]: 0 : masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_VID_IDX,
6937 : : const struct rte_flow_action_of_set_vlan_vid,
6938 : : vlan_vid);
6939 : : /*
6940 : : * PMD requires OF_SET_VLAN_VID mask to must match OF_PUSH_VLAN
6941 : : */
6942 [ # # ]: 0 : if (masked_push ^ masked_param)
6943 : 0 : return rte_flow_error_set(error, EINVAL,
6944 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6945 : : "OF_SET_VLAN_VID: mask does not match OF_PUSH_VLAN");
6946 [ # # ]: 0 : if (is_of_vlan_pcp_present(action)) {
6947 [ # # ]: 0 : if (mask[MLX5_HW_VLAN_PUSH_PCP_IDX].type !=
6948 : : RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
6949 : 0 : return rte_flow_error_set(error, EINVAL,
6950 : : RTE_FLOW_ERROR_TYPE_ACTION,
6951 : : action, "OF_SET_VLAN_PCP: missing mask configuration");
6952 [ # # # # ]: 0 : masked_param = X_FIELD(mask + MLX5_HW_VLAN_PUSH_PCP_IDX,
6953 : : const struct
6954 : : rte_flow_action_of_set_vlan_pcp,
6955 : : vlan_pcp);
6956 : : /*
6957 : : * PMD requires OF_SET_VLAN_PCP mask to must match OF_PUSH_VLAN
6958 : : */
6959 [ # # ]: 0 : if (masked_push ^ masked_param)
6960 : 0 : return rte_flow_error_set(error, EINVAL,
6961 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
6962 : : "OF_SET_VLAN_PCP: mask does not match OF_PUSH_VLAN");
6963 : : }
6964 : : return 0;
6965 : : #undef X_FIELD
6966 : : }
6967 : :
6968 : : static int
6969 : 0 : flow_hw_validate_action_default_miss(const struct rte_flow_actions_template_attr *attr,
6970 : : uint64_t action_flags,
6971 : : struct rte_flow_error *error)
6972 : : {
6973 : : /*
6974 : : * The private DEFAULT_MISS action is used internally for LACP in control
6975 : : * flows. So this validation can be ignored. It can be kept right now since
6976 : : * the validation will be done only once.
6977 : : */
6978 [ # # ]: 0 : if (!attr->ingress || attr->egress || attr->transfer)
6979 : 0 : return rte_flow_error_set(error, EINVAL,
6980 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6981 : : "DEFAULT MISS is only supported in ingress.");
6982 [ # # ]: 0 : if (action_flags & MLX5_FLOW_FATE_ACTIONS)
6983 : 0 : return rte_flow_error_set(error, EINVAL,
6984 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6985 : : "DEFAULT MISS should be the only termination.");
6986 : : return 0;
6987 : : }
6988 : :
6989 : : static int
6990 : 0 : flow_hw_validate_action_nat64(struct rte_eth_dev *dev, struct rte_flow_error *error)
6991 : : {
6992 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
6993 : :
6994 [ # # ]: 0 : if (!flow_hw_should_create_nat64_actions(priv))
6995 : 0 : return rte_flow_error_set(error, EOPNOTSUPP,
6996 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6997 : : "NAT64 action is not supported.");
6998 : :
6999 : : return 0;
7000 : : }
7001 : :
7002 : : static int
7003 : 0 : flow_hw_validate_action_jump(struct rte_eth_dev *dev,
7004 : : const struct rte_flow_actions_template_attr *attr,
7005 : : const struct rte_flow_action *action,
7006 : : const struct rte_flow_action *mask,
7007 : : struct rte_flow_error *error)
7008 : : {
7009 : 0 : const struct rte_flow_action_jump *m = mask->conf;
7010 : 0 : const struct rte_flow_action_jump *v = action->conf;
7011 : 0 : struct mlx5_flow_template_table_cfg cfg = {
7012 : : .external = true,
7013 : : .attr = {
7014 : : .flow_attr = {
7015 : 0 : .ingress = attr->ingress,
7016 : 0 : .egress = attr->egress,
7017 : 0 : .transfer = attr->transfer,
7018 : : },
7019 : : },
7020 : : };
7021 : 0 : uint32_t t_group = 0;
7022 : :
7023 [ # # # # ]: 0 : if (!m || !m->group)
7024 : : return 0;
7025 [ # # ]: 0 : if (!v)
7026 : 0 : return rte_flow_error_set(error, EINVAL,
7027 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
7028 : : "Invalid jump action configuration");
7029 [ # # ]: 0 : if (flow_hw_translate_group(dev, &cfg, v->group, &t_group, error))
7030 : 0 : return -rte_errno;
7031 [ # # ]: 0 : if (t_group == 0)
7032 : 0 : return rte_flow_error_set(error, EINVAL,
7033 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
7034 : : "Unsupported action - jump to root table");
7035 : : return 0;
7036 : : }
7037 : :
7038 : : static int
7039 : 0 : mlx5_flow_validate_action_jump_to_table_index(const struct rte_flow_action *action,
7040 : : const struct rte_flow_action *mask,
7041 : : struct rte_flow_error *error)
7042 : : {
7043 : 0 : const struct rte_flow_action_jump_to_table_index *m = mask->conf;
7044 : 0 : const struct rte_flow_action_jump_to_table_index *v = action->conf;
7045 : : struct mlx5dr_action *jump_action;
7046 : : uint32_t t_group = 0;
7047 : :
7048 [ # # # # ]: 0 : if (!m || !m->table)
7049 : : return 0;
7050 [ # # ]: 0 : if (!v)
7051 : 0 : return rte_flow_error_set(error, EINVAL,
7052 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
7053 : : "Invalid jump to matcher action configuration");
7054 : 0 : t_group = v->table->grp->group_id;
7055 [ # # ]: 0 : if (t_group == 0)
7056 : 0 : return rte_flow_error_set(error, EINVAL,
7057 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
7058 : : "Unsupported action - jump to root table");
7059 [ # # ]: 0 : if (likely(!rte_flow_template_table_resizable(0, &v->table->cfg.attr))) {
7060 : 0 : jump_action = v->table->matcher_info[0].jump;
7061 : : } else {
7062 : : uint32_t selector;
7063 : 0 : rte_rwlock_read_lock(&v->table->matcher_replace_rwlk);
7064 : 0 : selector = v->table->matcher_selector;
7065 : 0 : jump_action = v->table->matcher_info[selector].jump;
7066 : : rte_rwlock_read_unlock(&v->table->matcher_replace_rwlk);
7067 : : }
7068 [ # # ]: 0 : if (jump_action == NULL)
7069 : 0 : return rte_flow_error_set(error, EINVAL,
7070 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
7071 : : "Unsupported action - table is not an rule array");
7072 : : return 0;
7073 : : }
7074 : :
7075 : : static int
7076 : 0 : mlx5_hw_validate_action_mark(struct rte_eth_dev *dev,
7077 : : const struct rte_flow_action *template_action,
7078 : : const struct rte_flow_action *template_mask,
7079 : : uint64_t action_flags,
7080 : : const struct rte_flow_actions_template_attr *template_attr,
7081 : : struct rte_flow_error *error)
7082 : : {
7083 : 0 : const struct rte_flow_action_mark *mark_mask = template_mask->conf;
7084 : : const struct rte_flow_action *action =
7085 [ # # # # ]: 0 : mark_mask && mark_mask->id ? template_action :
7086 : 0 : &(const struct rte_flow_action) {
7087 : : .type = RTE_FLOW_ACTION_TYPE_MARK,
7088 : 0 : .conf = &(const struct rte_flow_action_mark) {
7089 : : .id = MLX5_FLOW_MARK_MAX - 1
7090 : : }
7091 : : };
7092 : 0 : const struct rte_flow_attr attr = {
7093 : 0 : .ingress = template_attr->ingress,
7094 : 0 : .egress = template_attr->egress,
7095 : 0 : .transfer = template_attr->transfer
7096 : : };
7097 : :
7098 [ # # ]: 0 : if (template_attr->transfer &&
7099 [ # # ]: 0 : !MLX5_SH(dev)->cdev->config.hca_attr.fdb_rx_set_flow_tag_stc)
7100 : 0 : return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7101 : : action,
7102 : : "mark action not supported for transfer");
7103 : :
7104 : 0 : return mlx5_flow_validate_action_mark(dev, action, action_flags,
7105 : : &attr, error);
7106 : : }
7107 : :
7108 : : static int
7109 : 0 : mlx5_hw_validate_action_queue(struct rte_eth_dev *dev,
7110 : : const struct rte_flow_action *template_action,
7111 : : const struct rte_flow_action *template_mask,
7112 : : const struct rte_flow_actions_template_attr *template_attr,
7113 : : uint64_t action_flags,
7114 : : struct rte_flow_error *error)
7115 : : {
7116 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7117 : 0 : const struct rte_flow_action_queue *queue_mask = template_mask->conf;
7118 : 0 : const struct rte_flow_attr attr = {
7119 : 0 : .ingress = template_attr->ingress,
7120 : 0 : .egress = template_attr->egress,
7121 : 0 : .transfer = template_attr->transfer
7122 : : };
7123 [ # # # # ]: 0 : bool masked = queue_mask != NULL && queue_mask->index;
7124 : :
7125 [ # # # # : 0 : if (template_attr->egress || (template_attr->transfer && !priv->jump_fdb_rx_en))
# # ]
7126 : 0 : return rte_flow_error_set(error, EINVAL,
7127 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
7128 : : "QUEUE action supported for ingress only");
7129 [ # # ]: 0 : if (masked)
7130 : 0 : return mlx5_flow_validate_action_queue(template_action, action_flags, dev,
7131 : : &attr, error);
7132 : : else
7133 : : return 0;
7134 : : }
7135 : :
7136 : : static int
7137 : 0 : mlx5_hw_validate_action_rss(struct rte_eth_dev *dev,
7138 : : const struct rte_flow_action *template_action,
7139 : : const struct rte_flow_action *template_mask,
7140 : : const struct rte_flow_actions_template_attr *template_attr,
7141 : : __rte_unused uint64_t action_flags,
7142 : : struct rte_flow_error *error)
7143 : : {
7144 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7145 : 0 : const struct rte_flow_action_rss *mask = template_mask->conf;
7146 : :
7147 [ # # # # : 0 : if (template_attr->egress || (template_attr->transfer && !priv->jump_fdb_rx_en))
# # ]
7148 : 0 : return rte_flow_error_set(error, EINVAL,
7149 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
7150 : : "RSS action supported for ingress only");
7151 [ # # ]: 0 : if (mask != NULL)
7152 : 0 : return mlx5_validate_action_rss(dev, template_action, error);
7153 : : else
7154 : : return 0;
7155 : : }
7156 : :
7157 : : static int
7158 : 0 : mlx5_hw_validate_action_l2_encap(struct rte_eth_dev *dev,
7159 : : const struct rte_flow_action *template_action,
7160 : : const struct rte_flow_action *template_mask,
7161 : : const struct rte_flow_actions_template_attr *template_attr,
7162 : : uint64_t action_flags,
7163 : : struct rte_flow_error *error)
7164 : : {
7165 : 0 : const struct rte_flow_action_vxlan_encap default_action_conf = {
7166 : : .definition = (struct rte_flow_item *)
7167 : 0 : (struct rte_flow_item [1]) {
7168 : : [0] = { .type = RTE_FLOW_ITEM_TYPE_END }
7169 : : }
7170 : : };
7171 : 0 : const struct rte_flow_action *action = template_mask->conf ?
7172 [ # # ]: 0 : template_action : &(const struct rte_flow_action) {
7173 : 0 : .type = template_mask->type,
7174 : : .conf = &default_action_conf
7175 : : };
7176 : 0 : const struct rte_flow_attr attr = {
7177 : 0 : .ingress = template_attr->ingress,
7178 : 0 : .egress = template_attr->egress,
7179 : 0 : .transfer = template_attr->transfer
7180 : : };
7181 : :
7182 : 0 : return mlx5_flow_dv_validate_action_l2_encap(dev, action_flags, action,
7183 : : &attr, error);
7184 : : }
7185 : :
7186 : : static int
7187 : 0 : mlx5_hw_validate_action_l2_decap(struct rte_eth_dev *dev,
7188 : : const struct rte_flow_action *template_action,
7189 : : const struct rte_flow_action *template_mask,
7190 : : const struct rte_flow_actions_template_attr *template_attr,
7191 : : uint64_t action_flags,
7192 : : struct rte_flow_error *error)
7193 : : {
7194 : 0 : const struct rte_flow_action_vxlan_encap default_action_conf = {
7195 : : .definition = (struct rte_flow_item *)
7196 : 0 : (struct rte_flow_item [1]) {
7197 : : [0] = { .type = RTE_FLOW_ITEM_TYPE_END }
7198 : : }
7199 : : };
7200 : 0 : const struct rte_flow_action *action = template_mask->conf ?
7201 [ # # ]: 0 : template_action : &(const struct rte_flow_action) {
7202 : 0 : .type = template_mask->type,
7203 : : .conf = &default_action_conf
7204 : : };
7205 : 0 : const struct rte_flow_attr attr = {
7206 : 0 : .ingress = template_attr->ingress,
7207 : 0 : .egress = template_attr->egress,
7208 : 0 : .transfer = template_attr->transfer
7209 : : };
7210 : : uint64_t item_flags =
7211 : 0 : action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
7212 [ # # ]: 0 : MLX5_FLOW_LAYER_VXLAN : 0;
7213 : :
7214 : 0 : return mlx5_flow_dv_validate_action_decap(dev, action_flags, action,
7215 : : item_flags, &attr, error);
7216 : : }
7217 : :
7218 : : static int
7219 : : mlx5_hw_validate_action_conntrack(struct rte_eth_dev *dev,
7220 : : const struct rte_flow_action *template_action,
7221 : : const struct rte_flow_action *template_mask,
7222 : : const struct rte_flow_actions_template_attr *template_attr,
7223 : : uint64_t action_flags,
7224 : : struct rte_flow_error *error)
7225 : : {
7226 : : RTE_SET_USED(template_action);
7227 : : RTE_SET_USED(template_mask);
7228 : : RTE_SET_USED(template_attr);
7229 : 0 : return mlx5_flow_dv_validate_action_aso_ct(dev, action_flags,
7230 : : MLX5_FLOW_LAYER_OUTER_L4_TCP,
7231 : : false, error);
7232 : : }
7233 : :
7234 : : static int
7235 : 0 : flow_hw_validate_action_raw_encap(const struct rte_flow_action *action,
7236 : : const struct rte_flow_action *mask,
7237 : : struct rte_flow_error *error)
7238 : : {
7239 : 0 : const struct rte_flow_action_raw_encap *mask_conf = mask->conf;
7240 : 0 : const struct rte_flow_action_raw_encap *action_conf = action->conf;
7241 : :
7242 [ # # # # ]: 0 : if (!mask_conf || !mask_conf->size)
7243 : 0 : return rte_flow_error_set(error, EINVAL,
7244 : : RTE_FLOW_ERROR_TYPE_ACTION, mask,
7245 : : "raw_encap: size must be masked");
7246 [ # # # # ]: 0 : if (!action_conf || !action_conf->size)
7247 : 0 : return rte_flow_error_set(error, EINVAL,
7248 : : RTE_FLOW_ERROR_TYPE_ACTION, action,
7249 : : "raw_encap: invalid action configuration");
7250 [ # # # # ]: 0 : if (mask_conf->data && !action_conf->data)
7251 : 0 : return rte_flow_error_set(error, EINVAL,
7252 : : RTE_FLOW_ERROR_TYPE_ACTION,
7253 : : action, "raw_encap: masked data is missing");
7254 : : return 0;
7255 : : }
7256 : :
7257 : :
7258 : : static int
7259 : 0 : flow_hw_validate_action_raw_reformat(struct rte_eth_dev *dev,
7260 : : const struct rte_flow_action *template_action,
7261 : : const struct rte_flow_action *template_mask,
7262 : : const struct
7263 : : rte_flow_actions_template_attr *template_attr,
7264 : : uint64_t *action_flags,
7265 : : struct rte_flow_error *error)
7266 : : {
7267 : : const struct rte_flow_action *encap_action = NULL;
7268 : : const struct rte_flow_action *encap_mask = NULL;
7269 : : const struct rte_flow_action_raw_decap *raw_decap = NULL;
7270 : : const struct rte_flow_action_raw_encap *raw_encap = NULL;
7271 : 0 : const struct rte_flow_attr attr = {
7272 : 0 : .ingress = template_attr->ingress,
7273 : 0 : .egress = template_attr->egress,
7274 : 0 : .transfer = template_attr->transfer
7275 : : };
7276 : : uint64_t item_flags = 0;
7277 : 0 : int ret, actions_n = 0;
7278 : :
7279 [ # # ]: 0 : if (template_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
7280 : 0 : raw_decap = template_mask->conf ?
7281 [ # # ]: 0 : template_action->conf : &empty_decap;
7282 [ # # ]: 0 : if ((template_action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7283 [ # # ]: 0 : if ((template_mask + 1)->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
7284 : 0 : return rte_flow_error_set(error, EINVAL,
7285 : : RTE_FLOW_ERROR_TYPE_ACTION,
7286 : 0 : template_mask + 1, "invalid mask type");
7287 : 0 : encap_action = template_action + 1;
7288 : 0 : encap_mask = template_mask + 1;
7289 : : }
7290 : : } else {
7291 : : encap_action = template_action;
7292 : : encap_mask = template_mask;
7293 : : }
7294 [ # # ]: 0 : if (encap_action) {
7295 : 0 : raw_encap = encap_action->conf;
7296 : 0 : ret = flow_hw_validate_action_raw_encap(encap_action,
7297 : : encap_mask, error);
7298 [ # # ]: 0 : if (ret)
7299 : : return ret;
7300 : : }
7301 : 0 : return mlx5_flow_dv_validate_action_raw_encap_decap(dev, raw_decap,
7302 : : raw_encap, &attr,
7303 : : action_flags,
7304 : : &actions_n,
7305 : : template_action,
7306 : : item_flags, error);
7307 : : }
7308 : :
7309 : :
7310 : :
7311 : : static int
7312 : 0 : mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
7313 : : const struct rte_flow_actions_template_attr *attr,
7314 : : const struct rte_flow_action actions[],
7315 : : const struct rte_flow_action masks[],
7316 : : uint64_t *act_flags,
7317 : : struct rte_flow_error *error)
7318 : : {
7319 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
7320 : : const struct rte_flow_action_count *count_mask = NULL;
7321 : 0 : bool fixed_cnt = false;
7322 : 0 : uint64_t action_flags = 0;
7323 : : bool actions_end = false;
7324 : : uint16_t i;
7325 : : int ret;
7326 : : const struct rte_flow_action_ipv6_ext_remove *remove_data;
7327 : :
7328 [ # # ]: 0 : if (!mlx5_hw_ctx_validate(dev, error))
7329 : 0 : return -rte_errno;
7330 : : /* FDB actions are only valid to proxy port. */
7331 [ # # # # : 0 : if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master))
# # ]
7332 : 0 : return rte_flow_error_set(error, EINVAL,
7333 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7334 : : NULL,
7335 : : "transfer actions are only valid to proxy port");
7336 [ # # ]: 0 : for (i = 0; !actions_end; ++i) {
7337 : 0 : const struct rte_flow_action *action = &actions[i];
7338 : 0 : const struct rte_flow_action *mask = &masks[i];
7339 : :
7340 : : MLX5_ASSERT(i < MLX5_HW_MAX_ACTS);
7341 [ # # ]: 0 : if (action->type != RTE_FLOW_ACTION_TYPE_INDIRECT &&
7342 [ # # ]: 0 : action->type != mask->type)
7343 : 0 : return rte_flow_error_set(error, ENOTSUP,
7344 : : RTE_FLOW_ERROR_TYPE_ACTION,
7345 : : action,
7346 : : "mask type does not match action type");
7347 [ # # # # : 0 : switch ((int)action->type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # ]
7348 : : case RTE_FLOW_ACTION_TYPE_VOID:
7349 : 0 : break;
7350 : : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7351 : : break;
7352 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
7353 : 0 : ret = flow_hw_validate_action_indirect(dev, action,
7354 : : mask,
7355 : : &action_flags,
7356 : : &fixed_cnt,
7357 : : error);
7358 [ # # ]: 0 : if (ret < 0)
7359 : 0 : return ret;
7360 : : break;
7361 : 0 : case RTE_FLOW_ACTION_TYPE_FLAG:
7362 : : /* TODO: Validation logic */
7363 : 0 : action_flags |= MLX5_FLOW_ACTION_FLAG;
7364 : 0 : break;
7365 : 0 : case RTE_FLOW_ACTION_TYPE_MARK:
7366 : 0 : ret = mlx5_hw_validate_action_mark(dev, action, mask,
7367 : : action_flags,
7368 : : attr, error);
7369 [ # # ]: 0 : if (ret)
7370 : 0 : return ret;
7371 : 0 : action_flags |= MLX5_FLOW_ACTION_MARK;
7372 : 0 : break;
7373 : 0 : case RTE_FLOW_ACTION_TYPE_DROP:
7374 : 0 : ret = mlx5_flow_validate_action_drop
7375 : : (dev, action_flags,
7376 : 0 : &(struct rte_flow_attr){.egress = attr->egress},
7377 : : error);
7378 [ # # ]: 0 : if (ret)
7379 : 0 : return ret;
7380 : 0 : action_flags |= MLX5_FLOW_ACTION_DROP;
7381 : 0 : break;
7382 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
7383 : : /* Only validate the jump to root table in template stage. */
7384 : 0 : ret = flow_hw_validate_action_jump(dev, attr, action, mask, error);
7385 [ # # ]: 0 : if (ret)
7386 : 0 : return ret;
7387 : 0 : action_flags |= MLX5_FLOW_ACTION_JUMP;
7388 : 0 : break;
7389 : : #ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
7390 : : case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
7391 : : if (priv->shared_host)
7392 : : return rte_flow_error_set(error, ENOTSUP,
7393 : : RTE_FLOW_ERROR_TYPE_ACTION,
7394 : : action,
7395 : : "action not supported in guest port");
7396 : : action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
7397 : : break;
7398 : : #endif
7399 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
7400 : 0 : ret = mlx5_hw_validate_action_queue(dev, action, mask,
7401 : : attr, action_flags,
7402 : : error);
7403 [ # # ]: 0 : if (ret)
7404 : 0 : return ret;
7405 : 0 : action_flags |= MLX5_FLOW_ACTION_QUEUE;
7406 : 0 : break;
7407 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
7408 : 0 : ret = mlx5_hw_validate_action_rss(dev, action, mask,
7409 : : attr, action_flags,
7410 : : error);
7411 [ # # ]: 0 : if (ret)
7412 : 0 : return ret;
7413 : 0 : action_flags |= MLX5_FLOW_ACTION_RSS;
7414 : 0 : break;
7415 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7416 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7417 : 0 : ret = mlx5_hw_validate_action_l2_encap(dev, action, mask,
7418 : : attr, action_flags,
7419 : : error);
7420 [ # # ]: 0 : if (ret)
7421 : 0 : return ret;
7422 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
7423 : 0 : break;
7424 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7425 : : case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7426 : 0 : ret = mlx5_hw_validate_action_l2_decap(dev, action, mask,
7427 : : attr, action_flags,
7428 : : error);
7429 [ # # ]: 0 : if (ret)
7430 : 0 : return ret;
7431 : 0 : action_flags |= MLX5_FLOW_ACTION_DECAP;
7432 : 0 : break;
7433 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7434 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7435 : 0 : ret = flow_hw_validate_action_raw_reformat(dev, action,
7436 : : mask, attr,
7437 : : &action_flags,
7438 : : error);
7439 [ # # ]: 0 : if (ret)
7440 : 0 : return ret;
7441 [ # # ]: 0 : if (action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
7442 [ # # ]: 0 : (action + 1)->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7443 : 0 : action_flags |= MLX5_FLOW_XCAP_ACTIONS;
7444 : 0 : i++;
7445 : : }
7446 : : break;
7447 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7448 : 0 : ret = flow_hw_validate_action_ipv6_ext_push(dev, action, error);
7449 [ # # ]: 0 : if (ret < 0)
7450 : 0 : return ret;
7451 : 0 : action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
7452 : 0 : break;
7453 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7454 : 0 : remove_data = action->conf;
7455 : : /* Remove action must be shared. */
7456 [ # # # # ]: 0 : if (remove_data->type != IPPROTO_ROUTING || !mask) {
7457 : 0 : DRV_LOG(ERR, "Only supports shared IPv6 routing remove");
7458 : 0 : return -EINVAL;
7459 : : }
7460 : 0 : action_flags |= MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE;
7461 : 0 : break;
7462 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
7463 : 0 : ret = flow_hw_validate_action_meter_mark(dev, action, false, error);
7464 [ # # ]: 0 : if (ret < 0)
7465 : 0 : return ret;
7466 : 0 : action_flags |= MLX5_FLOW_ACTION_METER;
7467 : 0 : break;
7468 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7469 : 0 : ret = flow_hw_validate_action_modify_field(dev, action, mask,
7470 : : error);
7471 [ # # ]: 0 : if (ret < 0)
7472 : 0 : return ret;
7473 : 0 : action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
7474 : 0 : break;
7475 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
7476 : 0 : ret = flow_hw_validate_action_represented_port
7477 : : (dev, action, mask, error);
7478 [ # # ]: 0 : if (ret < 0)
7479 : 0 : return ret;
7480 : 0 : action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7481 : 0 : break;
7482 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
7483 : 0 : ret = flow_hw_validate_action_port_representor
7484 : : (dev, attr, action, mask, error);
7485 [ # # ]: 0 : if (ret < 0)
7486 : 0 : return ret;
7487 : 0 : action_flags |= MLX5_FLOW_ACTION_PORT_REPRESENTOR;
7488 : 0 : break;
7489 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
7490 [ # # # # ]: 0 : if (count_mask && count_mask->id)
7491 : 0 : fixed_cnt = true;
7492 : 0 : ret = flow_hw_validate_action_age(dev, action,
7493 : : action_flags,
7494 : : fixed_cnt, error);
7495 [ # # ]: 0 : if (ret < 0)
7496 : 0 : return ret;
7497 : 0 : action_flags |= MLX5_FLOW_ACTION_AGE;
7498 : 0 : break;
7499 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
7500 : 0 : ret = flow_hw_validate_action_count(dev, action, mask,
7501 : : action_flags,
7502 : : error);
7503 [ # # ]: 0 : if (ret < 0)
7504 : 0 : return ret;
7505 : 0 : count_mask = mask->conf;
7506 : 0 : action_flags |= MLX5_FLOW_ACTION_COUNT;
7507 : 0 : break;
7508 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7509 : 0 : ret = mlx5_hw_validate_action_conntrack(dev, action, mask,
7510 : : attr, action_flags,
7511 : : error);
7512 [ # # ]: 0 : if (ret)
7513 : 0 : return ret;
7514 : 0 : action_flags |= MLX5_FLOW_ACTION_CT;
7515 : 0 : break;
7516 : 0 : case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7517 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7518 : 0 : break;
7519 : 0 : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7520 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7521 : 0 : break;
7522 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7523 : 0 : ret = flow_hw_validate_action_push_vlan
7524 : : (dev, attr, action, mask, error);
7525 [ # # ]: 0 : if (ret != 0)
7526 : 0 : return ret;
7527 : 0 : i += is_of_vlan_pcp_present(action) ?
7528 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
7529 : : MLX5_HW_VLAN_PUSH_VID_IDX;
7530 : 0 : action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7531 : 0 : break;
7532 : 0 : case RTE_FLOW_ACTION_TYPE_NAT64:
7533 : 0 : ret = flow_hw_validate_action_nat64(dev, error);
7534 [ # # ]: 0 : if (ret != 0)
7535 : 0 : return ret;
7536 : 0 : action_flags |= MLX5_FLOW_ACTION_NAT64;
7537 : 0 : break;
7538 : 0 : case RTE_FLOW_ACTION_TYPE_END:
7539 : : actions_end = true;
7540 : 0 : break;
7541 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7542 : 0 : ret = flow_hw_validate_action_default_miss(attr, action_flags, error);
7543 [ # # ]: 0 : if (ret < 0)
7544 : 0 : return ret;
7545 : 0 : action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
7546 : 0 : break;
7547 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
7548 : 0 : ret = mlx5_flow_validate_action_jump_to_table_index(action, mask, error);
7549 [ # # ]: 0 : if (ret < 0)
7550 : 0 : return ret;
7551 : 0 : action_flags |= MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX;
7552 : 0 : break;
7553 : 0 : default:
7554 : 0 : return rte_flow_error_set(error, ENOTSUP,
7555 : : RTE_FLOW_ERROR_TYPE_ACTION,
7556 : : action,
7557 : : "action not supported in template API");
7558 : : }
7559 : : }
7560 [ # # ]: 0 : if (act_flags != NULL)
7561 : 0 : *act_flags = action_flags;
7562 : : return 0;
7563 : : }
7564 : :
7565 : : static int
7566 : 0 : flow_hw_actions_validate(struct rte_eth_dev *dev,
7567 : : const struct rte_flow_actions_template_attr *attr,
7568 : : const struct rte_flow_action actions[],
7569 : : const struct rte_flow_action masks[],
7570 : : struct rte_flow_error *error)
7571 : : {
7572 : 0 : return mlx5_flow_hw_actions_validate(dev, attr, actions, masks, NULL, error);
7573 : : }
7574 : :
7575 : :
7576 : : static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
7577 : : [RTE_FLOW_ACTION_TYPE_MARK] = MLX5DR_ACTION_TYP_TAG,
7578 : : [RTE_FLOW_ACTION_TYPE_FLAG] = MLX5DR_ACTION_TYP_TAG,
7579 : : [RTE_FLOW_ACTION_TYPE_DROP] = MLX5DR_ACTION_TYP_DROP,
7580 : : [RTE_FLOW_ACTION_TYPE_JUMP] = MLX5DR_ACTION_TYP_TBL,
7581 : : [RTE_FLOW_ACTION_TYPE_QUEUE] = MLX5DR_ACTION_TYP_TIR,
7582 : : [RTE_FLOW_ACTION_TYPE_RSS] = MLX5DR_ACTION_TYP_TIR,
7583 : : [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7584 : : [RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2,
7585 : : [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7586 : : [RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2,
7587 : : [RTE_FLOW_ACTION_TYPE_MODIFY_FIELD] = MLX5DR_ACTION_TYP_MODIFY_HDR,
7588 : : [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = MLX5DR_ACTION_TYP_VPORT,
7589 : : [RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = MLX5DR_ACTION_TYP_MISS,
7590 : : [RTE_FLOW_ACTION_TYPE_CONNTRACK] = MLX5DR_ACTION_TYP_ASO_CT,
7591 : : [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = MLX5DR_ACTION_TYP_POP_VLAN,
7592 : : [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = MLX5DR_ACTION_TYP_PUSH_VLAN,
7593 : : [RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
7594 : : [RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH] = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT,
7595 : : [RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE] = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT,
7596 : : [RTE_FLOW_ACTION_TYPE_NAT64] = MLX5DR_ACTION_TYP_NAT64,
7597 : : [RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER,
7598 : : };
7599 : :
7600 : : static inline void
7601 : : action_template_set_type(struct rte_flow_actions_template *at,
7602 : : enum mlx5dr_action_type *action_types,
7603 : : unsigned int action_src, uint16_t *curr_off,
7604 : : enum mlx5dr_action_type type)
7605 : : {
7606 : 0 : at->dr_off[action_src] = *curr_off;
7607 : 0 : action_types[*curr_off] = type;
7608 : 0 : *curr_off = *curr_off + 1;
7609 : 0 : }
7610 : :
7611 : : static int
7612 : 0 : flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
7613 : : enum mlx5dr_action_type *action_types,
7614 : : uint16_t *curr_off, uint16_t *cnt_off,
7615 : : struct rte_flow_actions_template *at)
7616 : : {
7617 [ # # # # : 0 : switch (type) {
# ]
7618 : : case RTE_FLOW_ACTION_TYPE_RSS:
7619 : : action_template_set_type(at, action_types, action_src, curr_off,
7620 : : MLX5DR_ACTION_TYP_TIR);
7621 : : break;
7622 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
7623 : : case RTE_FLOW_ACTION_TYPE_COUNT:
7624 : : /*
7625 : : * Both AGE and COUNT action need counter, the first one fills
7626 : : * the action_types array, and the second only saves the offset.
7627 : : */
7628 [ # # ]: 0 : if (*cnt_off == UINT16_MAX) {
7629 : 0 : *cnt_off = *curr_off;
7630 : : action_template_set_type(at, action_types,
7631 : : action_src, curr_off,
7632 : : MLX5DR_ACTION_TYP_CTR);
7633 : : }
7634 : 0 : at->dr_off[action_src] = *cnt_off;
7635 : 0 : break;
7636 : : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
7637 : : action_template_set_type(at, action_types, action_src, curr_off,
7638 : : MLX5DR_ACTION_TYP_ASO_CT);
7639 : : break;
7640 : : case RTE_FLOW_ACTION_TYPE_QUOTA:
7641 : : case RTE_FLOW_ACTION_TYPE_METER_MARK:
7642 : : action_template_set_type(at, action_types, action_src, curr_off,
7643 : : MLX5DR_ACTION_TYP_ASO_METER);
7644 : : break;
7645 : 0 : default:
7646 : 0 : DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
7647 : 0 : return -EINVAL;
7648 : : }
7649 : : return 0;
7650 : : }
7651 : :
7652 : :
7653 : : static int
7654 : 0 : flow_hw_template_actions_list(struct rte_flow_actions_template *at,
7655 : : unsigned int action_src,
7656 : : enum mlx5dr_action_type *action_types,
7657 : : uint16_t *curr_off, uint16_t *cnt_off)
7658 : : {
7659 : : int ret;
7660 : 0 : const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
7661 [ # # # # ]: 0 : enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
7662 : : const union {
7663 : : struct mlx5_indlst_legacy *legacy;
7664 : : struct rte_flow_action_list_handle *handle;
7665 : : } indlst_obj = { .handle = indlst_conf->handle };
7666 : : enum mlx5dr_action_type type;
7667 : :
7668 [ # # # # ]: 0 : switch (list_type) {
7669 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
7670 : 0 : ret = flow_hw_dr_actions_template_handle_shared
7671 : 0 : (indlst_obj.legacy->legacy_type, action_src,
7672 : : action_types, curr_off, cnt_off, at);
7673 [ # # ]: 0 : if (ret)
7674 : 0 : return ret;
7675 : : break;
7676 : : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
7677 : : action_template_set_type(at, action_types, action_src, curr_off,
7678 : : MLX5DR_ACTION_TYP_DEST_ARRAY);
7679 : : break;
7680 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
7681 : 0 : type = ((struct mlx5_hw_encap_decap_action *)
7682 : : (indlst_conf->handle))->action_type;
7683 : : action_template_set_type(at, action_types, action_src, curr_off, type);
7684 : : break;
7685 : 0 : default:
7686 : 0 : DRV_LOG(ERR, "Unsupported indirect list type");
7687 : 0 : return -EINVAL;
7688 : : }
7689 : : return 0;
7690 : : }
7691 : :
7692 : : /**
7693 : : * Create DR action template based on a provided sequence of flow actions.
7694 : : *
7695 : : * @param[in] dev
7696 : : * Pointer to the rte_eth_dev structure.
7697 : : * @param[in] at
7698 : : * Pointer to flow actions template to be updated.
7699 : : * @param[out] action_types
7700 : : * Action types array to be filled.
7701 : : * @param[out] tmpl_flags
7702 : : * Template DR flags to be filled.
7703 : : *
7704 : : * @return
7705 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
7706 : : */
7707 : : static int
7708 : 0 : flow_hw_parse_flow_actions_to_dr_actions(struct rte_eth_dev *dev,
7709 : : struct rte_flow_actions_template *at,
7710 : : enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS],
7711 : : uint32_t *tmpl_flags __rte_unused)
7712 : : {
7713 : : unsigned int i;
7714 : : uint16_t curr_off;
7715 : : enum mlx5dr_action_type reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7716 : : uint16_t reformat_off = UINT16_MAX;
7717 : : uint16_t mhdr_off = UINT16_MAX;
7718 : : uint16_t recom_off = UINT16_MAX;
7719 : 0 : uint16_t cnt_off = UINT16_MAX;
7720 : : enum mlx5dr_action_type recom_type = MLX5DR_ACTION_TYP_LAST;
7721 : : int ret;
7722 : :
7723 [ # # ]: 0 : for (i = 0, curr_off = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
7724 : : const struct rte_flow_action_raw_encap *raw_encap_data;
7725 : : size_t data_size;
7726 : : enum mlx5dr_action_type type;
7727 : :
7728 [ # # ]: 0 : if (curr_off >= MLX5_HW_MAX_ACTS)
7729 : 0 : goto err_actions_num;
7730 [ # # # # : 0 : switch ((int)at->actions[i].type) {
# # # # #
# # # # #
# # # ]
7731 : : case RTE_FLOW_ACTION_TYPE_VOID:
7732 : : break;
7733 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
7734 : 0 : ret = flow_hw_template_actions_list(at, i, action_types,
7735 : : &curr_off, &cnt_off);
7736 [ # # ]: 0 : if (ret)
7737 : 0 : return ret;
7738 : : break;
7739 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
7740 : 0 : ret = flow_hw_dr_actions_template_handle_shared
7741 : 0 : (at->masks[i].type, i, action_types,
7742 : : &curr_off, &cnt_off, at);
7743 [ # # ]: 0 : if (ret)
7744 : 0 : return ret;
7745 : : break;
7746 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7747 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7748 : : case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7749 : : case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7750 : : MLX5_ASSERT(reformat_off == UINT16_MAX);
7751 : 0 : reformat_off = curr_off++;
7752 : 0 : reformat_act_type = mlx5_hw_dr_action_types[at->actions[i].type];
7753 : 0 : break;
7754 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_PUSH:
7755 : : MLX5_ASSERT(recom_off == UINT16_MAX);
7756 : : recom_type = MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT;
7757 : 0 : recom_off = curr_off++;
7758 : 0 : break;
7759 : 0 : case RTE_FLOW_ACTION_TYPE_IPV6_EXT_REMOVE:
7760 : : MLX5_ASSERT(recom_off == UINT16_MAX);
7761 : : recom_type = MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT;
7762 : 0 : recom_off = curr_off++;
7763 : 0 : break;
7764 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7765 : 0 : raw_encap_data = at->actions[i].conf;
7766 : 0 : data_size = raw_encap_data->size;
7767 [ # # ]: 0 : if (reformat_off != UINT16_MAX) {
7768 : : reformat_act_type = data_size < MLX5_ENCAPSULATION_DECISION_SIZE ?
7769 [ # # ]: 0 : MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2 :
7770 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
7771 : : } else {
7772 : 0 : reformat_off = curr_off++;
7773 : : reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
7774 : : }
7775 : : break;
7776 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7777 : 0 : reformat_off = curr_off++;
7778 : : reformat_act_type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
7779 : 0 : break;
7780 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
7781 [ # # ]: 0 : if (mhdr_off == UINT16_MAX) {
7782 : 0 : mhdr_off = curr_off++;
7783 : 0 : type = mlx5_hw_dr_action_types[at->actions[i].type];
7784 : 0 : action_types[mhdr_off] = type;
7785 : : }
7786 : : break;
7787 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7788 : 0 : type = mlx5_hw_dr_action_types[at->actions[i].type];
7789 : 0 : at->dr_off[i] = curr_off;
7790 : 0 : action_types[curr_off++] = type;
7791 : 0 : i += is_of_vlan_pcp_present(at->actions + i) ?
7792 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
7793 : : MLX5_HW_VLAN_PUSH_VID_IDX;
7794 : 0 : break;
7795 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
7796 : 0 : at->dr_off[i] = curr_off;
7797 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
7798 [ # # ]: 0 : if (curr_off >= MLX5_HW_MAX_ACTS)
7799 : 0 : goto err_actions_num;
7800 : : break;
7801 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
7802 : : case RTE_FLOW_ACTION_TYPE_COUNT:
7803 : : /*
7804 : : * Both AGE and COUNT action need counter, the first
7805 : : * one fills the action_types array, and the second only
7806 : : * saves the offset.
7807 : : */
7808 [ # # ]: 0 : if (cnt_off == UINT16_MAX) {
7809 : 0 : cnt_off = curr_off++;
7810 : 0 : action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
7811 : : }
7812 : 0 : at->dr_off[i] = cnt_off;
7813 : 0 : break;
7814 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
7815 : 0 : at->dr_off[i] = curr_off;
7816 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_MISS;
7817 : 0 : break;
7818 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
7819 : 0 : *tmpl_flags |= MLX5DR_ACTION_TEMPLATE_FLAG_RELAXED_ORDER;
7820 : 0 : at->dr_off[i] = curr_off;
7821 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_JUMP_TO_MATCHER;
7822 : 0 : break;
7823 : 0 : case MLX5_RTE_FLOW_ACTION_TYPE_MIRROR:
7824 : 0 : at->dr_off[i] = curr_off;
7825 : 0 : action_types[curr_off++] = MLX5DR_ACTION_TYP_DEST_ARRAY;
7826 : 0 : break;
7827 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_ID:
7828 : 0 : DRV_LOG(ERR, "RTE_FLOW_ACTION_TYPE_PORT_ID action is not supported. "
7829 : : "Use RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT instead.");
7830 : 0 : return -EINVAL;
7831 : 0 : default:
7832 : 0 : type = mlx5_hw_dr_action_types[at->actions[i].type];
7833 : 0 : at->dr_off[i] = curr_off;
7834 : 0 : action_types[curr_off++] = type;
7835 : 0 : break;
7836 : : }
7837 : : }
7838 [ # # ]: 0 : if (curr_off >= MLX5_HW_MAX_ACTS)
7839 : 0 : goto err_actions_num;
7840 [ # # ]: 0 : if (mhdr_off != UINT16_MAX)
7841 : 0 : at->mhdr_off = mhdr_off;
7842 [ # # ]: 0 : if (reformat_off != UINT16_MAX) {
7843 : 0 : at->reformat_off = reformat_off;
7844 : 0 : action_types[reformat_off] = reformat_act_type;
7845 : : }
7846 [ # # ]: 0 : if (recom_off != UINT16_MAX) {
7847 : 0 : at->recom_off = recom_off;
7848 : 0 : action_types[recom_off] = recom_type;
7849 : : }
7850 : 0 : at->dr_actions_num = curr_off;
7851 : :
7852 : : /* Create srh flex parser for remove anchor. */
7853 [ # # ]: 0 : if ((recom_type == MLX5DR_ACTION_TYP_POP_IPV6_ROUTE_EXT ||
7854 [ # # ]: 0 : recom_type == MLX5DR_ACTION_TYP_PUSH_IPV6_ROUTE_EXT) &&
7855 : 0 : (ret = mlx5_alloc_srh_flex_parser(dev))) {
7856 : 0 : DRV_LOG(ERR, "Failed to create srv6 flex parser");
7857 : 0 : return ret;
7858 : : }
7859 : : return 0;
7860 : 0 : err_actions_num:
7861 : 0 : DRV_LOG(ERR, "Number of HW actions (%u) exceeded maximum (%u) allowed in template",
7862 : : curr_off, MLX5_HW_MAX_ACTS);
7863 : 0 : return -EINVAL;
7864 : : }
7865 : :
7866 : : static int
7867 : 0 : flow_hw_set_vlan_vid(struct rte_eth_dev *dev,
7868 : : struct rte_flow_action *ra,
7869 : : struct rte_flow_action *rm,
7870 : : struct rte_flow_action_modify_field *spec,
7871 : : struct rte_flow_action_modify_field *mask,
7872 : : int set_vlan_vid_ix,
7873 : : struct rte_flow_error *error)
7874 : : {
7875 [ # # ]: 0 : const bool masked = rm[set_vlan_vid_ix].conf &&
7876 : : (((const struct rte_flow_action_of_set_vlan_vid *)
7877 [ # # ]: 0 : rm[set_vlan_vid_ix].conf)->vlan_vid != 0);
7878 : 0 : const struct rte_flow_action_of_set_vlan_vid *conf =
7879 : 0 : ra[set_vlan_vid_ix].conf;
7880 : 0 : int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7881 : : NULL, error);
7882 : : MLX5_ASSERT(width);
7883 : 0 : *spec = (typeof(*spec)) {
7884 : : .operation = RTE_FLOW_MODIFY_SET,
7885 : : .dst = {
7886 : : .field = RTE_FLOW_FIELD_VLAN_ID,
7887 : : .level = 0, .offset = 0,
7888 : : },
7889 : : .src = {
7890 : : .field = RTE_FLOW_FIELD_VALUE,
7891 : : },
7892 : : .width = width,
7893 : : };
7894 : 0 : *mask = (typeof(*mask)) {
7895 : : .operation = RTE_FLOW_MODIFY_SET,
7896 : : .dst = {
7897 : : .field = RTE_FLOW_FIELD_VLAN_ID,
7898 : : .level = 0xff, .offset = 0xffffffff,
7899 : : },
7900 : : .src = {
7901 : : .field = RTE_FLOW_FIELD_VALUE,
7902 : : },
7903 : : .width = 0xffffffff,
7904 : : };
7905 [ # # ]: 0 : if (masked) {
7906 : 0 : uint32_t mask_val = 0xffffffff;
7907 : :
7908 [ # # ]: 0 : rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid));
7909 [ # # ]: 0 : rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val));
7910 : : }
7911 : 0 : ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7912 : 0 : ra[set_vlan_vid_ix].conf = spec;
7913 : 0 : rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
7914 : 0 : rm[set_vlan_vid_ix].conf = mask;
7915 : 0 : return 0;
7916 : : }
7917 : :
7918 : : static __rte_always_inline int
7919 : : flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
7920 : : struct mlx5_modification_cmd *mhdr_cmd,
7921 : : struct mlx5_action_construct_data *act_data,
7922 : : const struct mlx5_hw_actions *hw_acts,
7923 : : const struct rte_flow_action *action)
7924 : : {
7925 : : struct rte_flow_error error;
7926 : 0 : rte_be16_t vid = ((const struct rte_flow_action_of_set_vlan_vid *)
7927 : 0 : action->conf)->vlan_vid;
7928 : 0 : int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0,
7929 : : NULL, &error);
7930 [ # # # # : 0 : struct rte_flow_action_modify_field conf = {
# # # # #
# ]
7931 : : .operation = RTE_FLOW_MODIFY_SET,
7932 : : .dst = {
7933 : : .field = RTE_FLOW_FIELD_VLAN_ID,
7934 : : .level = 0, .offset = 0,
7935 : : },
7936 : : .src = {
7937 : : .field = RTE_FLOW_FIELD_VALUE,
7938 : : },
7939 : : .width = width,
7940 : : };
7941 : : struct rte_flow_action modify_action = {
7942 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
7943 : : .conf = &conf
7944 : : };
7945 : :
7946 : : rte_memcpy(conf.src.value, &vid, sizeof(vid));
7947 : : return flow_hw_modify_field_construct(mhdr_cmd, act_data, hw_acts, &modify_action);
7948 : : }
7949 : :
7950 : : static int
7951 : 0 : flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
7952 : : struct rte_flow_item_flex_handle *handle,
7953 : : uint8_t *flex_item)
7954 : : {
7955 : 0 : int index = mlx5_flex_acquire_index(dev, handle, false);
7956 : :
7957 : : MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
7958 [ # # ]: 0 : if (index < 0)
7959 : : return -1;
7960 [ # # ]: 0 : if (!(*flex_item & RTE_BIT32(index))) {
7961 : : /* Don't count same flex item again. */
7962 : 0 : if (mlx5_flex_acquire_index(dev, handle, true) != index)
7963 : : MLX5_ASSERT(false);
7964 : 0 : *flex_item |= (uint8_t)RTE_BIT32(index);
7965 : : }
7966 : : return 0;
7967 : : }
7968 : :
7969 : : static void
7970 : 0 : flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
7971 : : {
7972 [ # # ]: 0 : while (*flex_item) {
7973 : 0 : int index = rte_bsf32(*flex_item);
7974 : :
7975 : 0 : mlx5_flex_release_index(dev, index);
7976 : 0 : *flex_item &= ~(uint8_t)RTE_BIT32(index);
7977 : : }
7978 : 0 : }
7979 : : static __rte_always_inline void
7980 : : flow_hw_actions_template_replace_container(const
7981 : : struct rte_flow_action *actions,
7982 : : const
7983 : : struct rte_flow_action *masks,
7984 : : struct rte_flow_action *new_actions,
7985 : : struct rte_flow_action *new_masks,
7986 : : struct rte_flow_action **ra,
7987 : : struct rte_flow_action **rm,
7988 : : uint32_t act_num)
7989 : : {
7990 : 0 : memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
7991 : : memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
7992 : : *ra = (void *)(uintptr_t)new_actions;
7993 : : *rm = (void *)(uintptr_t)new_masks;
7994 : 0 : }
7995 : :
7996 : : /* Action template copies these actions in rte_flow_conv() */
7997 : :
7998 : : static const struct rte_flow_action rx_meta_copy_action = {
7999 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8000 : : .conf = &(struct rte_flow_action_modify_field){
8001 : : .operation = RTE_FLOW_MODIFY_SET,
8002 : : .dst = {
8003 : : .field = (enum rte_flow_field_id)
8004 : : MLX5_RTE_FLOW_FIELD_META_REG,
8005 : : .tag_index = REG_B,
8006 : : },
8007 : : .src = {
8008 : : .field = (enum rte_flow_field_id)
8009 : : MLX5_RTE_FLOW_FIELD_META_REG,
8010 : : .tag_index = REG_C_1,
8011 : : },
8012 : : .width = 32,
8013 : : }
8014 : : };
8015 : :
8016 : : static const struct rte_flow_action rx_meta_copy_mask = {
8017 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8018 : : .conf = &(struct rte_flow_action_modify_field){
8019 : : .operation = RTE_FLOW_MODIFY_SET,
8020 : : .dst = {
8021 : : .field = (enum rte_flow_field_id)
8022 : : MLX5_RTE_FLOW_FIELD_META_REG,
8023 : : .level = UINT8_MAX,
8024 : : .tag_index = UINT8_MAX,
8025 : : .offset = UINT32_MAX,
8026 : : },
8027 : : .src = {
8028 : : .field = (enum rte_flow_field_id)
8029 : : MLX5_RTE_FLOW_FIELD_META_REG,
8030 : : .level = UINT8_MAX,
8031 : : .tag_index = UINT8_MAX,
8032 : : .offset = UINT32_MAX,
8033 : : },
8034 : : .width = UINT32_MAX,
8035 : : }
8036 : : };
8037 : :
8038 : : static const struct rte_flow_action quota_color_inc_action = {
8039 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8040 : : .conf = &(struct rte_flow_action_modify_field) {
8041 : : .operation = RTE_FLOW_MODIFY_ADD,
8042 : : .dst = {
8043 : : .field = RTE_FLOW_FIELD_METER_COLOR,
8044 : : .level = 0, .offset = 0
8045 : : },
8046 : : .src = {
8047 : : .field = RTE_FLOW_FIELD_VALUE,
8048 : : .level = 1,
8049 : : .offset = 0,
8050 : : },
8051 : : .width = 2
8052 : : }
8053 : : };
8054 : :
8055 : : static const struct rte_flow_action quota_color_inc_mask = {
8056 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
8057 : : .conf = &(struct rte_flow_action_modify_field) {
8058 : : .operation = RTE_FLOW_MODIFY_ADD,
8059 : : .dst = {
8060 : : .field = RTE_FLOW_FIELD_METER_COLOR,
8061 : : .level = UINT8_MAX,
8062 : : .tag_index = UINT8_MAX,
8063 : : .offset = UINT32_MAX,
8064 : : },
8065 : : .src = {
8066 : : .field = RTE_FLOW_FIELD_VALUE,
8067 : : .level = 3,
8068 : : .offset = 0
8069 : : },
8070 : : .width = UINT32_MAX
8071 : : }
8072 : : };
8073 : :
8074 : : /**
8075 : : * Create flow action template.
8076 : : *
8077 : : * @param[in] dev
8078 : : * Pointer to the rte_eth_dev structure.
8079 : : * @param[in] attr
8080 : : * Pointer to the action template attributes.
8081 : : * @param[in] actions
8082 : : * Associated actions (list terminated by the END action).
8083 : : * @param[in] masks
8084 : : * List of actions that marks which of the action's member is constant.
8085 : : * @param[in] nt_mode
8086 : : * Non template mode.
8087 : : * @param[out] error
8088 : : * Pointer to error structure.
8089 : : *
8090 : : * @return
8091 : : * Action template pointer on success, NULL otherwise and rte_errno is set.
8092 : : */
8093 : : static struct rte_flow_actions_template *
8094 : 0 : __flow_hw_actions_template_create(struct rte_eth_dev *dev,
8095 : : const struct rte_flow_actions_template_attr *attr,
8096 : : const struct rte_flow_action actions[],
8097 : : const struct rte_flow_action masks[],
8098 : : bool nt_mode,
8099 : : struct rte_flow_error *error)
8100 : : {
8101 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8102 : : int len, act_len, mask_len;
8103 : : int orig_act_len;
8104 : : unsigned int act_num;
8105 : : unsigned int i;
8106 : : struct rte_flow_actions_template *at = NULL;
8107 : : uint16_t pos;
8108 : 0 : uint64_t action_flags = 0;
8109 : : struct rte_flow_action tmp_action[MLX5_HW_MAX_ACTS];
8110 : : struct rte_flow_action tmp_mask[MLX5_HW_MAX_ACTS];
8111 : : struct rte_flow_action *ra = (void *)(uintptr_t)actions;
8112 : : struct rte_flow_action *rm = (void *)(uintptr_t)masks;
8113 : : int set_vlan_vid_ix = -1;
8114 : 0 : struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
8115 : 0 : struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
8116 : : struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
8117 : : struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
8118 : : uint32_t expand_mf_num = 0;
8119 : 0 : uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
8120 : 0 : enum mlx5dr_action_type action_types[MLX5_HW_MAX_ACTS] = { MLX5DR_ACTION_TYP_LAST };
8121 : 0 : uint32_t tmpl_flags = 0;
8122 : : int ret;
8123 : :
8124 [ # # # # ]: 0 : if (!nt_mode && mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
8125 : : &action_flags, error))
8126 : : return NULL;
8127 [ # # ]: 0 : for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
8128 [ # # # ]: 0 : switch (ra[i].type) {
8129 : : /* OF_PUSH_VLAN *MUST* come before OF_SET_VLAN_VID */
8130 : 0 : case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
8131 : 0 : i += is_of_vlan_pcp_present(ra + i) ?
8132 [ # # ]: 0 : MLX5_HW_VLAN_PUSH_PCP_IDX :
8133 : : MLX5_HW_VLAN_PUSH_VID_IDX;
8134 : 0 : break;
8135 : 0 : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
8136 : 0 : set_vlan_vid_ix = i;
8137 : 0 : break;
8138 : : default:
8139 : : break;
8140 : : }
8141 : : }
8142 : : /*
8143 : : * Count flow actions to allocate required space for storing DR offsets and to check
8144 : : * if temporary buffer would not be overrun.
8145 : : */
8146 : 0 : act_num = i + 1;
8147 [ # # ]: 0 : if (act_num >= MLX5_HW_MAX_ACTS) {
8148 : 0 : rte_flow_error_set(error, EINVAL,
8149 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
8150 : 0 : return NULL;
8151 : : }
8152 [ # # ]: 0 : if (set_vlan_vid_ix != -1) {
8153 : : /* If temporary action buffer was not used, copy template actions to it */
8154 : : if (ra == actions)
8155 : : flow_hw_actions_template_replace_container(actions,
8156 : : masks,
8157 : : tmp_action,
8158 : : tmp_mask,
8159 : : &ra, &rm,
8160 : : act_num);
8161 : 0 : ret = flow_hw_set_vlan_vid(dev, ra, rm,
8162 : : &set_vlan_vid_spec, &set_vlan_vid_mask,
8163 : : set_vlan_vid_ix, error);
8164 [ # # ]: 0 : if (ret)
8165 : 0 : goto error;
8166 : 0 : action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
8167 : : }
8168 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
8169 : 0 : mf_actions[expand_mf_num] = quota_color_inc_action;
8170 : 0 : mf_masks[expand_mf_num] = quota_color_inc_mask;
8171 : : expand_mf_num++;
8172 : : }
8173 [ # # ]: 0 : if (attr->ingress &&
8174 [ # # ]: 0 : (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
8175 [ # # ]: 0 : if ((priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
8176 [ # # ]: 0 : priv->sh->config.dv_esw_en) ||
8177 : 0 : mlx5_vport_rx_metadata_passing_enabled(priv->sh)) {
8178 : : /* Insert META copy */
8179 : 0 : mf_actions[expand_mf_num] = rx_meta_copy_action;
8180 : 0 : mf_masks[expand_mf_num] = rx_meta_copy_mask;
8181 : 0 : expand_mf_num++;
8182 : : MLX5_ASSERT(expand_mf_num <= MLX5_HW_MAX_ACTS);
8183 : : }
8184 : : }
8185 [ # # ]: 0 : if (expand_mf_num) {
8186 [ # # ]: 0 : if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
8187 : 0 : rte_flow_error_set(error, E2BIG,
8188 : : RTE_FLOW_ERROR_TYPE_ACTION,
8189 : : NULL, "cannot expand: too many actions");
8190 : 0 : return NULL;
8191 : : }
8192 [ # # ]: 0 : if (ra == actions)
8193 : : flow_hw_actions_template_replace_container(actions,
8194 : : masks,
8195 : : tmp_action,
8196 : : tmp_mask,
8197 : : &ra, &rm,
8198 : : act_num);
8199 : : /* Application should make sure only one Q/RSS exist in one rule. */
8200 : 0 : pos = flow_hw_template_expand_modify_field(ra, rm,
8201 : : mf_actions,
8202 : : mf_masks,
8203 : : action_flags,
8204 : : act_num,
8205 : : expand_mf_num);
8206 [ # # ]: 0 : if (pos == MLX5_HW_EXPAND_MH_FAILED) {
8207 : 0 : rte_flow_error_set(error, ENOMEM,
8208 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8209 : : NULL, "modify header expansion failed");
8210 : 0 : return NULL;
8211 : : }
8212 : : act_num += expand_mf_num;
8213 [ # # ]: 0 : for (i = pos + expand_mf_num; i < act_num; i++)
8214 : 0 : src_off[i] += expand_mf_num;
8215 : 0 : action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
8216 : : }
8217 : 0 : act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
8218 [ # # ]: 0 : if (act_len <= 0)
8219 : : return NULL;
8220 : 0 : len = RTE_ALIGN(act_len, 16);
8221 : 0 : mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, rm, error);
8222 [ # # ]: 0 : if (mask_len <= 0)
8223 : : return NULL;
8224 : 0 : len += RTE_ALIGN(mask_len, 16);
8225 : 0 : len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
8226 : 0 : len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
8227 : 0 : orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, actions, error);
8228 [ # # ]: 0 : if (orig_act_len <= 0)
8229 : : return NULL;
8230 : 0 : len += RTE_ALIGN(orig_act_len, 16);
8231 : 0 : at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
8232 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
8233 [ # # ]: 0 : if (!at) {
8234 : 0 : rte_flow_error_set(error, ENOMEM,
8235 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8236 : : NULL,
8237 : : "cannot allocate action template");
8238 : 0 : return NULL;
8239 : : }
8240 : : /* Actions part is in the first part. */
8241 : 0 : at->attr = *attr;
8242 : 0 : at->actions = (struct rte_flow_action *)(at + 1);
8243 : 0 : act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->actions,
8244 : : len, ra, error);
8245 [ # # ]: 0 : if (act_len <= 0)
8246 : 0 : goto error;
8247 : : /* Masks part is in the second part. */
8248 : 0 : at->masks = (struct rte_flow_action *)(((uint8_t *)at->actions) + act_len);
8249 : 0 : mask_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->masks,
8250 : 0 : len - act_len, rm, error);
8251 [ # # ]: 0 : if (mask_len <= 0)
8252 : 0 : goto error;
8253 : : /* DR actions offsets in the third part. */
8254 : 0 : at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
8255 : 0 : at->src_off = RTE_PTR_ADD(at->dr_off,
8256 : : RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
8257 : : memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
8258 : 0 : at->orig_actions = RTE_PTR_ADD(at->src_off,
8259 : : RTE_ALIGN(act_num * sizeof(*at->src_off), 16));
8260 : 0 : orig_act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, at->orig_actions, orig_act_len,
8261 : : actions, error);
8262 [ # # ]: 0 : if (orig_act_len <= 0)
8263 : 0 : goto error;
8264 : 0 : at->actions_num = act_num;
8265 [ # # ]: 0 : for (i = 0; i < at->actions_num; ++i)
8266 : 0 : at->dr_off[i] = UINT16_MAX;
8267 : 0 : at->reformat_off = UINT16_MAX;
8268 : 0 : at->mhdr_off = UINT16_MAX;
8269 : 0 : at->recom_off = UINT16_MAX;
8270 [ # # ]: 0 : for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
8271 : 0 : actions++, masks++, i++) {
8272 : : const struct rte_flow_action_modify_field *info;
8273 : :
8274 [ # # # ]: 0 : switch (actions->type) {
8275 : : /*
8276 : : * mlx5 PMD hacks indirect action index directly to the action conf.
8277 : : * The rte_flow_conv() function copies the content from conf pointer.
8278 : : * Need to restore the indirect action index from action conf here.
8279 : : */
8280 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
8281 : 0 : at->actions[i].conf = ra[i].conf;
8282 : 0 : at->masks[i].conf = rm[i].conf;
8283 : 0 : break;
8284 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
8285 : 0 : info = actions->conf;
8286 [ # # # # ]: 0 : if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
8287 : 0 : flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
8288 : 0 : &at->flex_item)) ||
8289 [ # # # # ]: 0 : (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
8290 : 0 : flow_hw_flex_item_acquire(dev, info->src.flex_handle,
8291 : : &at->flex_item)))
8292 : 0 : goto error;
8293 : : break;
8294 : : default:
8295 : : break;
8296 : : }
8297 : : }
8298 : 0 : ret = flow_hw_parse_flow_actions_to_dr_actions(dev, at, action_types, &tmpl_flags);
8299 [ # # ]: 0 : if (ret)
8300 : 0 : goto error;
8301 : 0 : at->action_flags = action_flags;
8302 : : /* In non template mode there is no need to create the dr template. */
8303 [ # # ]: 0 : if (nt_mode)
8304 : : return at;
8305 : 0 : at->tmpl = mlx5dr_action_template_create(action_types, tmpl_flags);
8306 [ # # ]: 0 : if (!at->tmpl) {
8307 : 0 : DRV_LOG(ERR, "Failed to create DR action template: %d", rte_errno);
8308 : 0 : goto error;
8309 : : }
8310 : 0 : rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
8311 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
8312 : 0 : return at;
8313 : 0 : error:
8314 : : if (at) {
8315 : 0 : mlx5_free(at);
8316 : : }
8317 : 0 : rte_flow_error_set(error, rte_errno,
8318 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8319 : : "Failed to create action template");
8320 : 0 : return NULL;
8321 : : }
8322 : :
8323 : : /**
8324 : : * Create flow action template.
8325 : : *
8326 : : * @param[in] dev
8327 : : * Pointer to the rte_eth_dev structure.
8328 : : * @param[in] attr
8329 : : * Pointer to the action template attributes.
8330 : : * @param[in] actions
8331 : : * Associated actions (list terminated by the END action).
8332 : : * @param[in] masks
8333 : : * List of actions that marks which of the action's member is constant.
8334 : : * @param[out] error
8335 : : * Pointer to error structure.
8336 : : *
8337 : : * @return
8338 : : * Action template pointer on success, NULL otherwise and rte_errno is set.
8339 : : */
8340 : : static struct rte_flow_actions_template *
8341 : 0 : flow_hw_actions_template_create(struct rte_eth_dev *dev,
8342 : : const struct rte_flow_actions_template_attr *attr,
8343 : : const struct rte_flow_action actions[],
8344 : : const struct rte_flow_action masks[],
8345 : : struct rte_flow_error *error)
8346 : : {
8347 : 0 : return __flow_hw_actions_template_create(dev, attr, actions, masks, false, error);
8348 : : }
8349 : :
8350 : : /**
8351 : : * Destroy flow action template.
8352 : : *
8353 : : * @param[in] dev
8354 : : * Pointer to the rte_eth_dev structure.
8355 : : * @param[in] template
8356 : : * Pointer to the action template to be destroyed.
8357 : : * @param[out] error
8358 : : * Pointer to error structure.
8359 : : *
8360 : : * @return
8361 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
8362 : : */
8363 : : static int
8364 : 0 : flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
8365 : : struct rte_flow_actions_template *template,
8366 : : struct rte_flow_error *error __rte_unused)
8367 : : {
8368 : : uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
8369 : : MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
8370 : :
8371 [ # # ]: 0 : if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
8372 : 0 : DRV_LOG(WARNING, "Action template %p is still in use.",
8373 : : (void *)template);
8374 : 0 : return rte_flow_error_set(error, EBUSY,
8375 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8376 : : NULL,
8377 : : "action template is in use");
8378 : : }
8379 [ # # ]: 0 : if (template->action_flags & flag)
8380 : 0 : mlx5_free_srh_flex_parser(dev);
8381 [ # # ]: 0 : LIST_REMOVE(template, next);
8382 : 0 : flow_hw_flex_item_release(dev, &template->flex_item);
8383 [ # # ]: 0 : if (template->tmpl)
8384 : 0 : mlx5dr_action_template_destroy(template->tmpl);
8385 : 0 : mlx5_free(template);
8386 : 0 : return 0;
8387 : : }
8388 : :
8389 : : static struct rte_flow_item *
8390 : 0 : flow_hw_prepend_item(const struct rte_flow_item *items,
8391 : : const uint32_t nb_items,
8392 : : const struct rte_flow_item *new_item,
8393 : : struct rte_flow_error *error)
8394 : : {
8395 : : struct rte_flow_item *copied_items;
8396 : : size_t size;
8397 : :
8398 : : /* Allocate new array of items. */
8399 : 0 : size = sizeof(*copied_items) * (nb_items + 1);
8400 : 0 : copied_items = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
8401 [ # # ]: 0 : if (!copied_items) {
8402 : 0 : rte_flow_error_set(error, ENOMEM,
8403 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8404 : : NULL,
8405 : : "cannot allocate item template");
8406 : 0 : return NULL;
8407 : : }
8408 : : /* Put new item at the beginning and copy the rest. */
8409 : 0 : copied_items[0] = *new_item;
8410 [ # # ]: 0 : rte_memcpy(&copied_items[1], items, sizeof(*items) * nb_items);
8411 : : return copied_items;
8412 : : }
8413 : :
8414 : : static int
8415 : 0 : flow_hw_item_compare_field_validate(enum rte_flow_field_id arg_field,
8416 : : enum rte_flow_field_id base_field,
8417 : : struct rte_flow_error *error)
8418 : : {
8419 [ # # # ]: 0 : switch (arg_field) {
8420 : : case RTE_FLOW_FIELD_TAG:
8421 : : case RTE_FLOW_FIELD_META:
8422 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8423 : : break;
8424 : 0 : case RTE_FLOW_FIELD_RANDOM:
8425 [ # # ]: 0 : if (base_field == RTE_FLOW_FIELD_VALUE)
8426 : : return 0;
8427 : 0 : return rte_flow_error_set(error, EINVAL,
8428 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8429 : : NULL,
8430 : : "compare random is supported only with immediate value");
8431 : 0 : default:
8432 : 0 : return rte_flow_error_set(error, ENOTSUP,
8433 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8434 : : NULL,
8435 : : "compare item argument field is not supported");
8436 : : }
8437 [ # # ]: 0 : switch (base_field) {
8438 : : case RTE_FLOW_FIELD_TAG:
8439 : : case RTE_FLOW_FIELD_META:
8440 : : case RTE_FLOW_FIELD_VALUE:
8441 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8442 : : break;
8443 : 0 : default:
8444 : 0 : return rte_flow_error_set(error, ENOTSUP,
8445 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8446 : : NULL,
8447 : : "compare item base field is not supported");
8448 : : }
8449 : : return 0;
8450 : : }
8451 : :
8452 : : static inline uint32_t
8453 : : flow_hw_item_compare_width_supported(enum rte_flow_field_id field)
8454 : : {
8455 [ # # # ]: 0 : switch (field) {
8456 : : case RTE_FLOW_FIELD_TAG:
8457 : : case RTE_FLOW_FIELD_META:
8458 : : case RTE_FLOW_FIELD_ESP_SEQ_NUM:
8459 : : return 32;
8460 : 0 : case RTE_FLOW_FIELD_RANDOM:
8461 : 0 : return 16;
8462 : : default:
8463 : : break;
8464 : : }
8465 : 0 : return 0;
8466 : : }
8467 : :
8468 : : static int
8469 : 0 : flow_hw_validate_item_compare(const struct rte_flow_item *item,
8470 : : struct rte_flow_error *error)
8471 : : {
8472 : 0 : const struct rte_flow_item_compare *comp_m = item->mask;
8473 : 0 : const struct rte_flow_item_compare *comp_v = item->spec;
8474 : : int ret;
8475 : :
8476 [ # # ]: 0 : if (unlikely(!comp_m))
8477 : 0 : return rte_flow_error_set(error, EINVAL,
8478 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8479 : : NULL,
8480 : : "compare item mask is missing");
8481 [ # # ]: 0 : if (comp_m->width != UINT32_MAX)
8482 : 0 : return rte_flow_error_set(error, EINVAL,
8483 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8484 : : NULL,
8485 : : "compare item only support full mask");
8486 : 0 : ret = flow_hw_item_compare_field_validate(comp_m->a.field,
8487 : 0 : comp_m->b.field, error);
8488 [ # # ]: 0 : if (ret < 0)
8489 : : return ret;
8490 [ # # ]: 0 : if (comp_v) {
8491 : : uint32_t width;
8492 : :
8493 [ # # ]: 0 : if (comp_v->operation != comp_m->operation ||
8494 [ # # ]: 0 : comp_v->a.field != comp_m->a.field ||
8495 [ # # ]: 0 : comp_v->b.field != comp_m->b.field)
8496 : 0 : return rte_flow_error_set(error, EINVAL,
8497 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8498 : : NULL,
8499 : : "compare item spec/mask not matching");
8500 : : width = flow_hw_item_compare_width_supported(comp_v->a.field);
8501 : : MLX5_ASSERT(width > 0);
8502 [ # # ]: 0 : if ((comp_v->width & comp_m->width) != width)
8503 : 0 : return rte_flow_error_set(error, EINVAL,
8504 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8505 : : NULL,
8506 : : "compare item only support full mask");
8507 : : }
8508 : : return 0;
8509 : : }
8510 : :
8511 : : static inline int
8512 : : mlx5_hw_validate_item_nsh(struct rte_eth_dev *dev,
8513 : : const struct rte_flow_item *item,
8514 : : struct rte_flow_error *error)
8515 : : {
8516 : 0 : return mlx5_flow_validate_item_nsh(dev, item, error);
8517 : : }
8518 : :
8519 : : static inline uint8_t
8520 : 0 : mlx5_hw_flow_get_next_protocol(const struct rte_flow_item *item)
8521 : : {
8522 [ # # # # ]: 0 : if (!item || !item->spec)
8523 : : return 0xff;
8524 : :
8525 [ # # # # ]: 0 : switch (item->type) {
8526 : 0 : case RTE_FLOW_ITEM_TYPE_IPV4: {
8527 : : const struct rte_flow_item_ipv4 *spec = item->spec;
8528 : 0 : const struct rte_flow_item_ipv4 *mask = item->mask;
8529 : :
8530 : : /* If mask is NULL or next_proto_id field in mask is 0,
8531 : : * then next_protocol in spec should not be read
8532 : : */
8533 [ # # # # ]: 0 : if (!mask || mask->hdr.next_proto_id == 0)
8534 : : return 0xff;
8535 : :
8536 : 0 : return spec->hdr.next_proto_id & mask->hdr.next_proto_id;
8537 : : }
8538 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6: {
8539 : : const struct rte_flow_item_ipv6 *spec = item->spec;
8540 : 0 : const struct rte_flow_item_ipv6 *mask = item->mask;
8541 : :
8542 : : /* If mask is NULL or proto field in mask is 0,
8543 : : * then proto in spec should not be read
8544 : : */
8545 [ # # # # ]: 0 : if (!mask || mask->hdr.proto == 0)
8546 : : return 0xff;
8547 : :
8548 : 0 : return spec->hdr.proto & mask->hdr.proto;
8549 : : }
8550 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: {
8551 : : const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
8552 : 0 : const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
8553 : :
8554 : : /* If mask is NULL or next_header field in mask is 0,
8555 : : * then next_header in spec should not be read
8556 : : */
8557 [ # # # # ]: 0 : if (!mask || mask->hdr.next_header == 0)
8558 : : return 0xff;
8559 : :
8560 : 0 : return spec->hdr.next_header & mask->hdr.next_header;
8561 : : }
8562 : : default:
8563 : : return 0xff;
8564 : : }
8565 : : }
8566 : :
8567 : : static int
8568 : 0 : mlx5_hw_flow_tunnel_ip_check(uint64_t last_item,
8569 : : const struct rte_flow_item *last_l3_item,
8570 : : const struct rte_flow_item *item,
8571 : : uint64_t *item_flags,
8572 : : struct rte_flow_error *error)
8573 : : {
8574 : : uint64_t tunnel_flag = 0;
8575 : : uint8_t outer_protocol;
8576 : :
8577 : : /* IP tunnel detection - only single-level tunneling supported */
8578 [ # # ]: 0 : if (last_l3_item && (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV4 ||
8579 [ # # ]: 0 : last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV6)) {
8580 : : /*
8581 : : * Tunnel type determination strategy:
8582 : : * 1. If previous L3 item has protocol field specified, use it (RFC compliant)
8583 : : * 2. Otherwise, fall back to inner header type (what's being encapsulated)
8584 : : */
8585 : 0 : outer_protocol = mlx5_hw_flow_get_next_protocol(last_l3_item);
8586 : :
8587 [ # # ]: 0 : if (outer_protocol != 0xff) {
8588 : : /* Proto field specified in outer hdr mask - use RFC-compliant detection */
8589 [ # # # ]: 0 : switch (outer_protocol) {
8590 : 0 : case IPPROTO_IPIP: /* 4 - IP-in-IP */
8591 : : /* Outer header indicates IPv4 payload */
8592 [ # # ]: 0 : if (item->type == RTE_FLOW_ITEM_TYPE_IPV6)
8593 : 0 : return rte_flow_error_set(error, EINVAL,
8594 : : RTE_FLOW_ERROR_TYPE_ITEM, item,
8595 : : "protocol mismatch: outer proto is IPIP but inner is IPv6");
8596 : : tunnel_flag = MLX5_FLOW_LAYER_IPIP;
8597 : : break;
8598 : 0 : case IPPROTO_IPV6: /* 41 - IPv6-in-IP */
8599 : : /* Outer header indicates IPv6 payload */
8600 [ # # ]: 0 : if (item->type == RTE_FLOW_ITEM_TYPE_IPV4)
8601 : 0 : return rte_flow_error_set(error, EINVAL,
8602 : : RTE_FLOW_ERROR_TYPE_ITEM, item,
8603 : : "protocol mismatch: outer proto is IPV6 but inner is IPv4");
8604 : : tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP;
8605 : : break;
8606 : 0 : default:
8607 : : /* Unknown/unsupported protocol, fall back to inner header type */
8608 : 0 : goto fallback_classification;
8609 : : }
8610 : : } else {
8611 : 0 : fallback_classification:
8612 : : /*
8613 : : * Protocol field not specified or unknown - classify based on
8614 : : * what is being encapsulated (inner header type)
8615 : : */
8616 [ # # ]: 0 : if (item->type == RTE_FLOW_ITEM_TYPE_IPV4)
8617 : : tunnel_flag = MLX5_FLOW_LAYER_IPIP;
8618 [ # # ]: 0 : else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6)
8619 : : tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP;
8620 : : else
8621 : : return 0; /* Not an IP item - shouldn't happen, but be defensive */
8622 : : }
8623 : :
8624 : : /* Check for unsupported nested tunneling after tunnel is detected */
8625 [ # # ]: 0 : if (*item_flags & MLX5_FLOW_LAYER_TUNNEL)
8626 : 0 : return rte_flow_error_set(error, ENOTSUP,
8627 : : RTE_FLOW_ERROR_TYPE_ITEM, item,
8628 : : "multiple tunnel layers not supported");
8629 : :
8630 : 0 : *item_flags |= tunnel_flag;
8631 : 0 : return 1; /* Tunnel detected */
8632 [ # # ]: 0 : } else if (last_item == MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) {
8633 : : /* Special case: IPv6 routing extension header */
8634 : : /* Check for unsupported nested tunneling */
8635 [ # # ]: 0 : if (*item_flags & MLX5_FLOW_LAYER_TUNNEL)
8636 : 0 : return rte_flow_error_set(error, ENOTSUP,
8637 : : RTE_FLOW_ERROR_TYPE_ITEM, item,
8638 : : "multiple tunnel layers not supported");
8639 : :
8640 : 0 : *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
8641 : 0 : return 1; /* Tunnel detected */
8642 : : }
8643 : :
8644 : : return 0; /* No tunnel */
8645 : : }
8646 : :
8647 : : const struct rte_flow_item_ipv4 hws_nic_ipv4_mask = {
8648 : : .hdr = {
8649 : : .version = 0xf,
8650 : : .ihl = 0xf,
8651 : : .type_of_service = 0xff,
8652 : : .total_length = RTE_BE16(0xffff),
8653 : : .packet_id = RTE_BE16(0xffff),
8654 : : .fragment_offset = RTE_BE16(0xffff),
8655 : : .time_to_live = 0xff,
8656 : : .next_proto_id = 0xff,
8657 : : .src_addr = RTE_BE32(0xffffffff),
8658 : : .dst_addr = RTE_BE32(0xffffffff),
8659 : : },
8660 : : };
8661 : :
8662 : : const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
8663 : : .hdr = {
8664 : : .vtc_flow = RTE_BE32(0xffffffff),
8665 : : .payload_len = RTE_BE16(0xffff),
8666 : : .proto = 0xff,
8667 : : .hop_limits = 0xff,
8668 : : .src_addr = RTE_IPV6_MASK_FULL,
8669 : : .dst_addr = RTE_IPV6_MASK_FULL,
8670 : : },
8671 : : .has_frag_ext = 1,
8672 : : };
8673 : :
8674 : : const struct rte_flow_item_ecpri hws_nic_ecpri_mask = {
8675 : : .hdr = {
8676 : : .common = {
8677 : : .u32 = RTE_BE32(0xffffffff),
8678 : : },
8679 : : .dummy[0] = 0xffffffff,
8680 : : },
8681 : : };
8682 : :
8683 : :
8684 : : static int
8685 : 0 : flow_hw_validate_item_ptype(const struct rte_flow_item *item,
8686 : : struct rte_flow_error *error)
8687 : : {
8688 : 0 : const struct rte_flow_item_ptype *ptype = item->mask;
8689 : :
8690 : : /* HWS does not allow empty PTYPE mask */
8691 [ # # ]: 0 : if (!ptype)
8692 : 0 : return rte_flow_error_set(error, EINVAL,
8693 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8694 : : NULL, "empty ptype mask");
8695 [ # # ]: 0 : if (!(ptype->packet_type &
8696 : : (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
8697 : : RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK |
8698 : : RTE_PTYPE_INNER_L4_MASK)))
8699 : 0 : return rte_flow_error_set(error, ENOTSUP,
8700 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8701 : : NULL, "ptype mask not supported");
8702 : : return 0;
8703 : : }
8704 : :
8705 : : struct mlx5_hw_pattern_validation_ctx {
8706 : : const struct rte_flow_item *geneve_item;
8707 : : const struct rte_flow_item *flex_item;
8708 : : };
8709 : :
8710 : : static int
8711 : 0 : __flow_hw_pattern_validate(struct rte_eth_dev *dev,
8712 : : const struct rte_flow_pattern_template_attr *attr,
8713 : : const struct rte_flow_item items[],
8714 : : uint64_t *item_flags,
8715 : : bool nt_flow,
8716 : : struct rte_flow_error *error)
8717 : : {
8718 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
8719 : : const struct rte_flow_item *last_l3_item = NULL;
8720 : : const struct rte_flow_item *item;
8721 : : const struct rte_flow_item *gtp_item = NULL;
8722 : : const struct rte_flow_item *gre_item = NULL;
8723 : 0 : const struct rte_flow_attr flow_attr = {
8724 : 0 : .ingress = attr->ingress,
8725 : 0 : .egress = attr->egress,
8726 : 0 : .transfer = attr->transfer
8727 : : };
8728 : : int ret, tag_idx;
8729 : : uint32_t tag_bitmap = 0;
8730 : : uint64_t last_item = 0;
8731 : :
8732 [ # # ]: 0 : if (!mlx5_hw_ctx_validate(dev, error))
8733 : 0 : return -rte_errno;
8734 [ # # ]: 0 : if (!attr->ingress && !attr->egress && !attr->transfer)
8735 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8736 : : "at least one of the direction attributes"
8737 : : " must be specified");
8738 [ # # ]: 0 : if (priv->sh->config.dv_esw_en) {
8739 : : MLX5_ASSERT(priv->master || priv->representor);
8740 [ # # ]: 0 : if (priv->master) {
8741 [ # # ]: 0 : if ((attr->ingress && attr->egress) ||
8742 [ # # ]: 0 : (attr->ingress && attr->transfer) ||
8743 [ # # ]: 0 : (attr->egress && attr->transfer))
8744 : 0 : return rte_flow_error_set(error, EINVAL,
8745 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8746 : : "only one direction attribute at once"
8747 : : " can be used on transfer proxy port");
8748 : : } else {
8749 [ # # ]: 0 : if (attr->transfer)
8750 : 0 : return rte_flow_error_set(error, EINVAL,
8751 : : RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8752 : : "transfer attribute cannot be used with"
8753 : : " port representors");
8754 [ # # ]: 0 : if (attr->ingress && attr->egress)
8755 : 0 : return rte_flow_error_set(error, EINVAL,
8756 : : RTE_FLOW_ERROR_TYPE_ATTR, NULL,
8757 : : "ingress and egress direction attributes"
8758 : : " cannot be used at the same time on"
8759 : : " port representors");
8760 : : }
8761 : : } else {
8762 [ # # ]: 0 : if (attr->transfer)
8763 : 0 : return rte_flow_error_set(error, EINVAL,
8764 : : RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
8765 : : "transfer attribute cannot be used when"
8766 : : " E-Switch is disabled");
8767 : : }
8768 [ # # ]: 0 : for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
8769 : 0 : bool tunnel = *item_flags & MLX5_FLOW_LAYER_TUNNEL;
8770 : :
8771 [ # # # # : 0 : switch ((int)item->type) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # # #
# # # #
# ]
8772 : 0 : case RTE_FLOW_ITEM_TYPE_PTYPE:
8773 : 0 : ret = flow_hw_validate_item_ptype(item, error);
8774 [ # # ]: 0 : if (ret)
8775 : 0 : return ret;
8776 : : last_item = MLX5_FLOW_ITEM_PTYPE;
8777 : : break;
8778 : 0 : case RTE_FLOW_ITEM_TYPE_TAG:
8779 : : {
8780 : 0 : const struct rte_flow_item_tag *tag =
8781 : : (const struct rte_flow_item_tag *)item->spec;
8782 : :
8783 [ # # ]: 0 : if (tag == NULL)
8784 : 0 : return rte_flow_error_set(error, EINVAL,
8785 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8786 : : NULL,
8787 : : "Tag spec is NULL");
8788 [ # # ]: 0 : if (tag->index >= MLX5_FLOW_HW_TAGS_MAX &&
8789 : : tag->index != RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX)
8790 : 0 : return rte_flow_error_set(error, EINVAL,
8791 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8792 : : NULL,
8793 : : "Invalid tag index");
8794 [ # # ]: 0 : tag_idx = flow_hw_get_reg_id(dev, RTE_FLOW_ITEM_TYPE_TAG, tag->index);
8795 [ # # ]: 0 : if (tag_idx == REG_NON)
8796 : 0 : return rte_flow_error_set(error, EINVAL,
8797 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8798 : : NULL,
8799 : : "Unsupported tag index");
8800 [ # # ]: 0 : if (tag_bitmap & (1 << tag_idx))
8801 : 0 : return rte_flow_error_set(error, EINVAL,
8802 : : RTE_FLOW_ERROR_TYPE_ITEM,
8803 : : NULL,
8804 : : "Duplicated tag index");
8805 : 0 : tag_bitmap |= 1 << tag_idx;
8806 : : last_item = MLX5_FLOW_ITEM_TAG;
8807 : 0 : break;
8808 : : }
8809 : 0 : case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8810 : : {
8811 : 0 : const struct rte_flow_item_tag *tag =
8812 : : (const struct rte_flow_item_tag *)item->spec;
8813 : 0 : uint16_t regcs = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
8814 : :
8815 [ # # ]: 0 : if (!((1 << (tag->index - REG_C_0)) & regcs))
8816 : 0 : return rte_flow_error_set(error, EINVAL,
8817 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8818 : : NULL,
8819 : : "Unsupported internal tag index");
8820 [ # # ]: 0 : if (tag_bitmap & (1 << tag->index))
8821 : 0 : return rte_flow_error_set(error, EINVAL,
8822 : : RTE_FLOW_ERROR_TYPE_ITEM,
8823 : : NULL,
8824 : : "Duplicated tag index");
8825 : 0 : tag_bitmap |= 1 << tag->index;
8826 : 0 : break;
8827 : : }
8828 : 0 : case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
8829 [ # # ]: 0 : if (attr->ingress)
8830 : 0 : return rte_flow_error_set(error, EINVAL,
8831 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8832 : : "represented port item cannot be used"
8833 : : " when ingress attribute is set");
8834 [ # # ]: 0 : if (attr->egress)
8835 : 0 : return rte_flow_error_set(error, EINVAL,
8836 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8837 : : "represented port item cannot be used"
8838 : : " when egress attribute is set");
8839 : : last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
8840 : : break;
8841 : 0 : case RTE_FLOW_ITEM_TYPE_META:
8842 : : /* ingress + group 0 is not supported */
8843 : 0 : *item_flags |= MLX5_FLOW_ITEM_METADATA;
8844 : 0 : break;
8845 : : case RTE_FLOW_ITEM_TYPE_METER_COLOR:
8846 : : {
8847 : : int reg = flow_hw_get_reg_id(dev,
8848 : : RTE_FLOW_ITEM_TYPE_METER_COLOR,
8849 : : 0);
8850 [ # # ]: 0 : if (reg == REG_NON)
8851 : 0 : return rte_flow_error_set(error, EINVAL,
8852 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8853 : : NULL,
8854 : : "Unsupported meter color register");
8855 [ # # ]: 0 : if (*item_flags &
8856 : : (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
8857 : 0 : return rte_flow_error_set
8858 : : (error, EINVAL,
8859 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
8860 : : last_item = MLX5_FLOW_ITEM_METER_COLOR;
8861 : : break;
8862 : : }
8863 : 0 : case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
8864 : : {
8865 [ # # ]: 0 : if (!priv->sh->lag_rx_port_affinity_en)
8866 : 0 : return rte_flow_error_set(error, EINVAL,
8867 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8868 : : "Unsupported aggregated affinity with Older FW");
8869 [ # # # # : 0 : if ((attr->transfer && priv->fdb_def_rule) || attr->egress)
# # ]
8870 : 0 : return rte_flow_error_set(error, EINVAL,
8871 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL,
8872 : : "Aggregated affinity item not supported"
8873 : : " with egress or transfer"
8874 : : " attribute");
8875 : : last_item = MLX5_FLOW_ITEM_AGGR_AFFINITY;
8876 : : break;
8877 : : }
8878 : 0 : case RTE_FLOW_ITEM_TYPE_GENEVE:
8879 : : last_item = MLX5_FLOW_LAYER_GENEVE;
8880 : 0 : break;
8881 : 0 : case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
8882 : : {
8883 : : last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
8884 : : /*
8885 : : * For non template the parser is internally created before
8886 : : * the flow creation.
8887 : : */
8888 [ # # ]: 0 : if (!nt_flow) {
8889 : 0 : ret = mlx5_flow_geneve_tlv_option_validate(priv, item,
8890 : : error);
8891 [ # # ]: 0 : if (ret < 0)
8892 : 0 : return ret;
8893 : : }
8894 : : break;
8895 : : }
8896 : 0 : case RTE_FLOW_ITEM_TYPE_COMPARE:
8897 : : {
8898 : : last_item = MLX5_FLOW_ITEM_COMPARE;
8899 : 0 : ret = flow_hw_validate_item_compare(item, error);
8900 [ # # ]: 0 : if (ret)
8901 : 0 : return ret;
8902 : : break;
8903 : : }
8904 : 0 : case RTE_FLOW_ITEM_TYPE_ETH:
8905 : 0 : ret = mlx5_flow_validate_item_eth(dev, item,
8906 : : *item_flags,
8907 : : true, error);
8908 [ # # ]: 0 : if (ret < 0)
8909 : 0 : return ret;
8910 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8911 : : MLX5_FLOW_LAYER_OUTER_L2;
8912 : : break;
8913 : 0 : case RTE_FLOW_ITEM_TYPE_VLAN:
8914 : 0 : ret = mlx5_flow_dv_validate_item_vlan(item, *item_flags,
8915 : : dev, error);
8916 [ # # ]: 0 : if (ret < 0)
8917 : 0 : return ret;
8918 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
8919 : : MLX5_FLOW_LAYER_OUTER_VLAN;
8920 : : break;
8921 : 0 : case RTE_FLOW_ITEM_TYPE_IPV4:
8922 : 0 : ret = mlx5_hw_flow_tunnel_ip_check(last_item, last_l3_item, item,
8923 : : item_flags, error);
8924 [ # # ]: 0 : if (ret < 0)
8925 : 0 : return ret;
8926 : 0 : tunnel |= (ret > 0);
8927 : 0 : ret = mlx5_flow_dv_validate_item_ipv4(dev, item,
8928 : : *item_flags,
8929 : : last_item, 0,
8930 : : &hws_nic_ipv4_mask,
8931 : : error);
8932 [ # # ]: 0 : if (ret)
8933 : 0 : return ret;
8934 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8935 : : MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8936 : : last_l3_item = item;
8937 : : break;
8938 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6:
8939 : 0 : ret = mlx5_hw_flow_tunnel_ip_check(last_item, last_l3_item, item,
8940 : : item_flags, error);
8941 [ # # ]: 0 : if (ret < 0)
8942 : 0 : return ret;
8943 : 0 : tunnel |= (ret > 0);
8944 : 0 : ret = mlx5_flow_validate_item_ipv6(dev, item,
8945 : : *item_flags,
8946 : : last_item, 0,
8947 : : &hws_nic_ipv6_mask,
8948 : : error);
8949 [ # # ]: 0 : if (ret < 0)
8950 : 0 : return ret;
8951 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8952 : : MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8953 : : last_l3_item = item;
8954 : : break;
8955 : 0 : case RTE_FLOW_ITEM_TYPE_UDP:
8956 : 0 : ret = mlx5_flow_validate_item_udp(dev, item,
8957 : : *item_flags,
8958 : : 0xff, error);
8959 [ # # ]: 0 : if (ret)
8960 : 0 : return ret;
8961 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8962 : : MLX5_FLOW_LAYER_OUTER_L4_UDP;
8963 : : break;
8964 : 0 : case RTE_FLOW_ITEM_TYPE_TCP:
8965 : 0 : ret = mlx5_flow_validate_item_tcp
8966 : : (dev, item, *item_flags,
8967 : : 0xff, &nic_tcp_mask, error);
8968 [ # # ]: 0 : if (ret < 0)
8969 : 0 : return ret;
8970 [ # # ]: 0 : last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8971 : : MLX5_FLOW_LAYER_OUTER_L4_TCP;
8972 : : break;
8973 : 0 : case RTE_FLOW_ITEM_TYPE_GTP:
8974 : : gtp_item = item;
8975 : 0 : ret = mlx5_flow_dv_validate_item_gtp(dev, gtp_item,
8976 : : *item_flags, error);
8977 [ # # ]: 0 : if (ret < 0)
8978 : 0 : return ret;
8979 : : last_item = MLX5_FLOW_LAYER_GTP;
8980 : : break;
8981 : 0 : case RTE_FLOW_ITEM_TYPE_GTP_PSC:
8982 : 0 : ret = mlx5_flow_dv_validate_item_gtp_psc(dev, item,
8983 : : last_item,
8984 : : gtp_item,
8985 : : false, error);
8986 [ # # ]: 0 : if (ret < 0)
8987 : 0 : return ret;
8988 : : last_item = MLX5_FLOW_LAYER_GTP_PSC;
8989 : : break;
8990 : 0 : case RTE_FLOW_ITEM_TYPE_VXLAN:
8991 : 0 : ret = mlx5_flow_validate_item_vxlan(dev, 0, item,
8992 : : *item_flags,
8993 : : false, error);
8994 [ # # ]: 0 : if (ret < 0)
8995 : 0 : return ret;
8996 : : last_item = MLX5_FLOW_LAYER_VXLAN;
8997 : : break;
8998 : 0 : case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8999 : 0 : ret = mlx5_flow_validate_item_vxlan_gpe(item,
9000 : : *item_flags,
9001 : : dev, error);
9002 [ # # ]: 0 : if (ret < 0)
9003 : 0 : return ret;
9004 : : last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
9005 : : break;
9006 : 0 : case RTE_FLOW_ITEM_TYPE_MPLS:
9007 : 0 : ret = mlx5_flow_validate_item_mpls(dev, item,
9008 : : *item_flags,
9009 : : last_item, error);
9010 [ # # ]: 0 : if (ret < 0)
9011 : 0 : return ret;
9012 : : last_item = MLX5_FLOW_LAYER_MPLS;
9013 : : break;
9014 : 0 : case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
9015 : : case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
9016 : : last_item = MLX5_FLOW_ITEM_SQ;
9017 : 0 : break;
9018 : 0 : case RTE_FLOW_ITEM_TYPE_GRE:
9019 : 0 : ret = mlx5_flow_validate_item_gre(dev, item,
9020 : : *item_flags,
9021 : : 0xff, error);
9022 [ # # ]: 0 : if (ret < 0)
9023 : 0 : return ret;
9024 : : gre_item = item;
9025 : : last_item = MLX5_FLOW_LAYER_GRE;
9026 : : break;
9027 : 0 : case RTE_FLOW_ITEM_TYPE_GRE_KEY:
9028 [ # # ]: 0 : if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
9029 : 0 : return rte_flow_error_set
9030 : : (error, EINVAL,
9031 : : RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
9032 : 0 : ret = mlx5_flow_validate_item_gre_key
9033 : : (dev, item, *item_flags, gre_item, error);
9034 [ # # ]: 0 : if (ret < 0)
9035 : 0 : return ret;
9036 : : last_item = MLX5_FLOW_LAYER_GRE_KEY;
9037 : : break;
9038 : 0 : case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
9039 [ # # ]: 0 : if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
9040 : 0 : return rte_flow_error_set
9041 : : (error, EINVAL,
9042 : : RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE item is missing");
9043 : 0 : ret = mlx5_flow_validate_item_gre_option(dev, item,
9044 : : *item_flags,
9045 : : &flow_attr,
9046 : : gre_item,
9047 : : error);
9048 [ # # ]: 0 : if (ret < 0)
9049 : 0 : return ret;
9050 : : last_item = MLX5_FLOW_LAYER_GRE;
9051 : : break;
9052 : 0 : case RTE_FLOW_ITEM_TYPE_NVGRE:
9053 : 0 : ret = mlx5_flow_validate_item_nvgre(dev, item,
9054 : : *item_flags, 0xff,
9055 : : error);
9056 [ # # ]: 0 : if (ret)
9057 : 0 : return ret;
9058 : : last_item = MLX5_FLOW_LAYER_NVGRE;
9059 : : break;
9060 : 0 : case RTE_FLOW_ITEM_TYPE_ICMP:
9061 : 0 : ret = mlx5_flow_validate_item_icmp(dev, item,
9062 : : *item_flags, 0xff,
9063 : : error);
9064 [ # # ]: 0 : if (ret < 0)
9065 : 0 : return ret;
9066 : : last_item = MLX5_FLOW_LAYER_ICMP;
9067 : : break;
9068 : 0 : case RTE_FLOW_ITEM_TYPE_ICMP6:
9069 : 0 : ret = mlx5_flow_validate_item_icmp6(dev, item,
9070 : : *item_flags, 0xff,
9071 : : error);
9072 [ # # ]: 0 : if (ret < 0)
9073 : 0 : return ret;
9074 : : last_item = MLX5_FLOW_LAYER_ICMP6;
9075 : : break;
9076 : 0 : case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
9077 : : case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
9078 : 0 : ret = mlx5_flow_validate_item_icmp6_echo(dev, item,
9079 : : *item_flags,
9080 : : 0xff, error);
9081 [ # # ]: 0 : if (ret < 0)
9082 : 0 : return ret;
9083 : : last_item = MLX5_FLOW_LAYER_ICMP6;
9084 : : break;
9085 : 0 : case RTE_FLOW_ITEM_TYPE_CONNTRACK:
9086 [ # # ]: 0 : if (*item_flags &
9087 : : (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
9088 : 0 : return rte_flow_error_set
9089 : : (error, EINVAL,
9090 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
9091 : 0 : ret = mlx5_flow_dv_validate_item_aso_ct(dev, item,
9092 : : item_flags,
9093 : : error);
9094 [ # # ]: 0 : if (ret < 0)
9095 : 0 : return ret;
9096 : : break;
9097 : 0 : case RTE_FLOW_ITEM_TYPE_QUOTA:
9098 [ # # ]: 0 : if (*item_flags &
9099 : : (MLX5_FLOW_ITEM_METER_COLOR |
9100 : : MLX5_FLOW_LAYER_ASO_CT))
9101 : 0 : return rte_flow_error_set
9102 : : (error, EINVAL,
9103 : : RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only one ASO item is supported");
9104 : : last_item = MLX5_FLOW_ITEM_QUOTA;
9105 : : break;
9106 : 0 : case RTE_FLOW_ITEM_TYPE_ESP:
9107 : 0 : ret = mlx5_flow_os_validate_item_esp(dev, item, *item_flags,
9108 : : 0xff, true, error);
9109 [ # # ]: 0 : if (ret < 0)
9110 : 0 : return ret;
9111 : : last_item = MLX5_FLOW_ITEM_ESP;
9112 : : break;
9113 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
9114 : : last_item = tunnel ?
9115 [ # # ]: 0 : MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
9116 : : MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
9117 : : break;
9118 : 0 : case RTE_FLOW_ITEM_TYPE_FLEX: {
9119 : 0 : enum rte_flow_item_flex_tunnel_mode tunnel_mode = FLEX_TUNNEL_MODE_SINGLE;
9120 : :
9121 : 0 : ret = mlx5_flex_get_tunnel_mode(item, &tunnel_mode);
9122 [ # # ]: 0 : if (ret < 0)
9123 : 0 : return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
9124 : : item, "Unable to get flex item mode");
9125 [ # # ]: 0 : if (tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL)
9126 : : last_item = MLX5_FLOW_ITEM_FLEX_TUNNEL;
9127 : : else
9128 : : last_item = tunnel ?
9129 [ # # ]: 0 : MLX5_FLOW_ITEM_INNER_FLEX :
9130 : : MLX5_FLOW_ITEM_OUTER_FLEX;
9131 : 0 : break;
9132 : : }
9133 : 0 : case RTE_FLOW_ITEM_TYPE_RANDOM:
9134 : : last_item = MLX5_FLOW_ITEM_RANDOM;
9135 : 0 : break;
9136 : 0 : case RTE_FLOW_ITEM_TYPE_NSH:
9137 : : last_item = MLX5_FLOW_ITEM_NSH;
9138 : : ret = mlx5_hw_validate_item_nsh(dev, item, error);
9139 [ # # ]: 0 : if (ret < 0)
9140 : 0 : return ret;
9141 : : break;
9142 : : case RTE_FLOW_ITEM_TYPE_INTEGRITY:
9143 : : /*
9144 : : * Integrity flow item validation require access to
9145 : : * both item mask and spec.
9146 : : * Current HWS model allows item mask in pattern
9147 : : * template and item spec in flow rule.
9148 : : */
9149 : : break;
9150 : 0 : case RTE_FLOW_ITEM_TYPE_ECPRI:
9151 : 0 : ret = mlx5_flow_validate_item_ecpri(dev, item, *item_flags, last_item,
9152 : : RTE_ETHER_TYPE_ECPRI,
9153 : : &hws_nic_ecpri_mask, error);
9154 [ # # ]: 0 : if (ret < 0)
9155 : 0 : return ret;
9156 : 0 : *item_flags |= MLX5_FLOW_LAYER_ECPRI;
9157 : 0 : break;
9158 : : case RTE_FLOW_ITEM_TYPE_IB_BTH:
9159 : : case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
9160 : : case RTE_FLOW_ITEM_TYPE_VOID:
9161 : : case RTE_FLOW_ITEM_TYPE_END:
9162 : : break;
9163 : 0 : default:
9164 : 0 : return rte_flow_error_set(error, EINVAL,
9165 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9166 : : NULL,
9167 : : "Unsupported item type");
9168 : : }
9169 : 0 : *item_flags |= last_item;
9170 : : }
9171 : 0 : return 1 + RTE_PTR_DIFF(item, items) / sizeof(item[0]);
9172 : : }
9173 : :
9174 : : static int
9175 : 0 : flow_hw_pattern_validate(struct rte_eth_dev *dev,
9176 : : const struct rte_flow_pattern_template_attr *attr,
9177 : : const struct rte_flow_item items[],
9178 : : uint64_t *item_flags,
9179 : : struct rte_flow_error *error)
9180 : : {
9181 : 0 : return __flow_hw_pattern_validate(dev, attr, items, item_flags, false, error);
9182 : : }
9183 : :
9184 : : /*
9185 : : * Verify that the tested flow patterns fits STE size limit in HWS group.
9186 : : *
9187 : : *
9188 : : * Return values:
9189 : : * 0 : Tested patterns fit STE size limit
9190 : : * -EINVAL : Invalid parameters detected
9191 : : * -E2BIG : Tested patterns exceed STE size limit
9192 : : */
9193 : : static int
9194 : 0 : pattern_template_validate(struct rte_eth_dev *dev,
9195 : : struct rte_flow_pattern_template *pt[],
9196 : : uint32_t pt_num,
9197 : : struct rte_flow_error *error)
9198 : : {
9199 : 0 : struct mlx5_flow_template_table_cfg tbl_cfg = {
9200 : : .attr = {
9201 : : .nb_flows = 64,
9202 : : .insertion_type = RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN,
9203 : : .hash_func = RTE_FLOW_TABLE_HASH_FUNC_DEFAULT,
9204 : : .flow_attr = {
9205 : : .group = 1,
9206 : 0 : .ingress = pt[0]->attr.ingress,
9207 : 0 : .egress = pt[0]->attr.egress,
9208 : 0 : .transfer = pt[0]->attr.transfer
9209 : : }
9210 : : }
9211 : : };
9212 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9213 : : struct rte_flow_actions_template *action_template;
9214 : : struct rte_flow_template_table *tmpl_tbl;
9215 : : int ret;
9216 : :
9217 [ # # ]: 0 : if (pt[0]->attr.ingress) {
9218 : 0 : action_template =
9219 : 0 : priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
9220 [ # # ]: 0 : } else if (pt[0]->attr.egress) {
9221 : 0 : action_template =
9222 : 0 : priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
9223 [ # # ]: 0 : } else if (pt[0]->attr.transfer) {
9224 : 0 : action_template =
9225 : 0 : priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
9226 : : } else {
9227 : : ret = EINVAL;
9228 : 0 : goto end;
9229 : : }
9230 : :
9231 [ # # ]: 0 : if (pt[0]->item_flags & MLX5_FLOW_ITEM_COMPARE)
9232 : 0 : tbl_cfg.attr.nb_flows = 1;
9233 : 0 : tmpl_tbl = flow_hw_table_create(dev, &tbl_cfg, pt, pt_num,
9234 : : &action_template, 1, error);
9235 [ # # ]: 0 : if (tmpl_tbl) {
9236 : : ret = 0;
9237 : 0 : flow_hw_table_destroy(dev, tmpl_tbl, error);
9238 : : } else {
9239 [ # # # ]: 0 : switch (rte_errno) {
9240 : : case E2BIG:
9241 : : ret = E2BIG;
9242 : : break;
9243 : : case ENOTSUP:
9244 : : ret = EINVAL;
9245 : : break;
9246 : : default:
9247 : : ret = 0;
9248 : : break;
9249 : : }
9250 : : }
9251 : : end:
9252 : : if (ret)
9253 : 0 : rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9254 : : NULL, "failed to validate pattern template");
9255 : 0 : return -ret;
9256 : : }
9257 : :
9258 : : /**
9259 : : * Create flow item template.
9260 : : *
9261 : : * @param[in] dev
9262 : : * Pointer to the rte_eth_dev structure.
9263 : : * @param[in] attr
9264 : : * Pointer to the item template attributes.
9265 : : * @param[in] items
9266 : : * The template item pattern.
9267 : : * @param[out] error
9268 : : * Pointer to error structure.
9269 : : *
9270 : : * @return
9271 : : * Item template pointer on success, NULL otherwise and rte_errno is set.
9272 : : */
9273 : : static struct rte_flow_pattern_template *
9274 : 0 : flow_hw_pattern_template_create(struct rte_eth_dev *dev,
9275 : : const struct rte_flow_pattern_template_attr *attr,
9276 : : const struct rte_flow_item items[],
9277 : : bool external,
9278 : : struct rte_flow_error *error)
9279 : : {
9280 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9281 : : struct rte_flow_pattern_template *it;
9282 : : struct rte_flow_item *copied_items = NULL;
9283 : : const struct rte_flow_item *tmpl_items;
9284 : 0 : uint64_t orig_item_nb, item_flags = 0;
9285 : 0 : struct rte_flow_item port = {
9286 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
9287 : : .mask = &rte_flow_item_ethdev_mask,
9288 : : };
9289 : 0 : struct rte_flow_item_tag tag_v = {
9290 : : .data = 0,
9291 : : .index = REG_C_0,
9292 : : };
9293 : 0 : struct rte_flow_item_tag tag_m = {
9294 : : .data = flow_hw_tx_tag_regc_mask(dev),
9295 : : .index = 0xff,
9296 : : };
9297 : 0 : struct rte_flow_item tag = {
9298 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
9299 : : .spec = &tag_v,
9300 : : .mask = &tag_m,
9301 : : .last = NULL
9302 : : };
9303 : : int it_items_size;
9304 : : unsigned int i = 0;
9305 : : int rc;
9306 : :
9307 : : /* Validate application items only */
9308 : : rc = flow_hw_pattern_validate(dev, attr, items, &item_flags, error);
9309 [ # # ]: 0 : if (rc < 0)
9310 : : return NULL;
9311 : 0 : orig_item_nb = rc;
9312 [ # # ]: 0 : if (priv->sh->config.dv_esw_en &&
9313 [ # # ]: 0 : attr->ingress && !attr->egress && !attr->transfer) {
9314 : 0 : copied_items = flow_hw_prepend_item(items, orig_item_nb, &port, error);
9315 [ # # ]: 0 : if (!copied_items)
9316 : : return NULL;
9317 : : tmpl_items = copied_items;
9318 [ # # ]: 0 : } else if (priv->sh->config.dv_esw_en &&
9319 [ # # ]: 0 : !attr->ingress && attr->egress && !attr->transfer) {
9320 [ # # ]: 0 : if (item_flags & MLX5_FLOW_ITEM_SQ) {
9321 : 0 : DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match for egress "
9322 : : "pattern template", dev->data->port_id);
9323 : : tmpl_items = items;
9324 : 0 : goto setup_pattern_template;
9325 : : }
9326 : 0 : copied_items = flow_hw_prepend_item(items, orig_item_nb, &tag, error);
9327 [ # # ]: 0 : if (!copied_items)
9328 : : return NULL;
9329 : : tmpl_items = copied_items;
9330 : : } else {
9331 : : tmpl_items = items;
9332 : : }
9333 : 0 : setup_pattern_template:
9334 : 0 : it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, SOCKET_ID_ANY);
9335 [ # # ]: 0 : if (!it) {
9336 : 0 : rte_flow_error_set(error, ENOMEM,
9337 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9338 : : NULL,
9339 : : "cannot allocate item template");
9340 : 0 : goto error;
9341 : : }
9342 : 0 : it->attr = *attr;
9343 : 0 : it->item_flags = item_flags;
9344 : 0 : it->orig_item_nb = orig_item_nb;
9345 : 0 : it_items_size = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, NULL, 0, tmpl_items, error);
9346 [ # # ]: 0 : if (it_items_size <= 0) {
9347 : 0 : rte_flow_error_set(error, ENOMEM,
9348 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9349 : : NULL,
9350 : : "Failed to determine buffer size for pattern");
9351 : 0 : goto error;
9352 : : }
9353 : 0 : it_items_size = RTE_ALIGN(it_items_size, 16);
9354 : 0 : it->items = mlx5_malloc(MLX5_MEM_ZERO, it_items_size, 0, SOCKET_ID_ANY);
9355 [ # # ]: 0 : if (it->items == NULL) {
9356 : 0 : rte_flow_error_set(error, ENOMEM,
9357 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9358 : : NULL,
9359 : : "Cannot allocate memory for pattern");
9360 : 0 : goto error;
9361 : : }
9362 : 0 : rc = rte_flow_conv(RTE_FLOW_CONV_OP_PATTERN, it->items, it_items_size, tmpl_items, error);
9363 [ # # ]: 0 : if (rc <= 0) {
9364 : 0 : rte_flow_error_set(error, ENOMEM,
9365 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9366 : : NULL,
9367 : : "Failed to store pattern");
9368 : 0 : goto error;
9369 : : }
9370 : 0 : it->mt = mlx5dr_match_template_create(tmpl_items, attr->relaxed_matching);
9371 [ # # ]: 0 : if (!it->mt) {
9372 : 0 : rte_flow_error_set(error, rte_errno,
9373 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9374 : : NULL,
9375 : : "cannot create match template");
9376 : 0 : goto error;
9377 : : }
9378 [ # # ]: 0 : if (copied_items) {
9379 [ # # ]: 0 : if (attr->ingress)
9380 : 0 : it->implicit_port = true;
9381 [ # # ]: 0 : else if (attr->egress)
9382 : 0 : it->implicit_tag = true;
9383 : 0 : mlx5_free(copied_items);
9384 : : copied_items = NULL;
9385 : : }
9386 : : /* Either inner or outer, can't both. */
9387 [ # # ]: 0 : if (it->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
9388 : : MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) {
9389 [ # # ]: 0 : if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
9390 [ # # ]: 0 : (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) ||
9391 : 0 : (mlx5_alloc_srh_flex_parser(dev))) {
9392 : 0 : rte_flow_error_set(error, rte_errno,
9393 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9394 : : "cannot create IPv6 routing extension support");
9395 : 0 : goto error;
9396 : : }
9397 : : }
9398 [ # # ]: 0 : if (it->item_flags & MLX5_FLOW_ITEM_FLEX) {
9399 [ # # ]: 0 : for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
9400 : 0 : const struct rte_flow_item_flex *spec = items[i].spec;
9401 : : struct rte_flow_item_flex_handle *handle;
9402 : :
9403 [ # # ]: 0 : if (items[i].type != RTE_FLOW_ITEM_TYPE_FLEX)
9404 : 0 : continue;
9405 : 0 : handle = spec->handle;
9406 [ # # ]: 0 : if (flow_hw_flex_item_acquire(dev, handle,
9407 : 0 : &it->flex_item)) {
9408 : 0 : rte_flow_error_set(error, EINVAL,
9409 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9410 : : NULL, "cannot create hw FLEX item");
9411 : 0 : goto error;
9412 : : }
9413 : : }
9414 : : }
9415 [ # # ]: 0 : if (it->item_flags & MLX5_FLOW_LAYER_GENEVE_OPT) {
9416 [ # # ]: 0 : for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
9417 : 0 : const struct rte_flow_item_geneve_opt *spec =
9418 : : items[i].spec;
9419 : :
9420 [ # # ]: 0 : if (items[i].type != RTE_FLOW_ITEM_TYPE_GENEVE_OPT)
9421 : 0 : continue;
9422 [ # # ]: 0 : if (mlx5_geneve_tlv_option_register(priv, spec,
9423 : 0 : &it->geneve_opt_mng)) {
9424 : 0 : rte_flow_error_set(error, EINVAL,
9425 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9426 : : NULL, "cannot register GENEVE TLV option");
9427 : 0 : goto error;
9428 : : }
9429 : : }
9430 : : }
9431 : 0 : rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
9432 [ # # ]: 0 : if (external) {
9433 : 0 : rc = pattern_template_validate(dev, &it, 1, error);
9434 [ # # ]: 0 : if (rc)
9435 : 0 : goto error;
9436 : : }
9437 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
9438 : 0 : return it;
9439 : 0 : error:
9440 [ # # ]: 0 : if (it) {
9441 [ # # ]: 0 : if (it->flex_item)
9442 : 0 : flow_hw_flex_item_release(dev, &it->flex_item);
9443 [ # # ]: 0 : if (it->geneve_opt_mng.nb_options)
9444 : 0 : mlx5_geneve_tlv_options_unregister(priv, &it->geneve_opt_mng);
9445 [ # # ]: 0 : if (it->mt)
9446 : 0 : claim_zero(mlx5dr_match_template_destroy(it->mt));
9447 : 0 : mlx5_free(it->items);
9448 : 0 : mlx5_free(it);
9449 : : }
9450 [ # # ]: 0 : if (copied_items)
9451 : 0 : mlx5_free(copied_items);
9452 : : return NULL;
9453 : : }
9454 : :
9455 : : static struct rte_flow_pattern_template *
9456 : 0 : flow_hw_external_pattern_template_create
9457 : : (struct rte_eth_dev *dev,
9458 : : const struct rte_flow_pattern_template_attr *attr,
9459 : : const struct rte_flow_item items[],
9460 : : struct rte_flow_error *error)
9461 : : {
9462 : 0 : return flow_hw_pattern_template_create(dev, attr, items, true, error);
9463 : : }
9464 : :
9465 : : /**
9466 : : * Destroy flow item template.
9467 : : *
9468 : : * @param[in] dev
9469 : : * Pointer to the rte_eth_dev structure.
9470 : : * @param[in] template
9471 : : * Pointer to the item template to be destroyed.
9472 : : * @param[out] error
9473 : : * Pointer to error structure.
9474 : : *
9475 : : * @return
9476 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
9477 : : */
9478 : : static int
9479 : 0 : flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
9480 : : struct rte_flow_pattern_template *template,
9481 : : struct rte_flow_error *error __rte_unused)
9482 : : {
9483 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9484 : :
9485 [ # # ]: 0 : if (rte_atomic_load_explicit(&template->refcnt, rte_memory_order_relaxed) > 1) {
9486 : 0 : DRV_LOG(WARNING, "Item template %p is still in use.",
9487 : : (void *)template);
9488 : 0 : return rte_flow_error_set(error, EBUSY,
9489 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9490 : : NULL,
9491 : : "item template is in use");
9492 : : }
9493 [ # # ]: 0 : if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
9494 : : MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
9495 : 0 : mlx5_free_srh_flex_parser(dev);
9496 [ # # ]: 0 : LIST_REMOVE(template, next);
9497 : 0 : flow_hw_flex_item_release(dev, &template->flex_item);
9498 : 0 : mlx5_geneve_tlv_options_unregister(priv, &template->geneve_opt_mng);
9499 : 0 : claim_zero(mlx5dr_match_template_destroy(template->mt));
9500 : 0 : mlx5_free(template->items);
9501 : 0 : mlx5_free(template);
9502 : 0 : return 0;
9503 : : }
9504 : :
9505 : : /*
9506 : : * Get information about HWS pre-configurable resources.
9507 : : *
9508 : : * @param[in] dev
9509 : : * Pointer to the rte_eth_dev structure.
9510 : : * @param[out] port_info
9511 : : * Pointer to port information.
9512 : : * @param[out] queue_info
9513 : : * Pointer to queue information.
9514 : : * @param[out] error
9515 : : * Pointer to error structure.
9516 : : *
9517 : : * @return
9518 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
9519 : : */
9520 : : static int
9521 : 0 : flow_hw_info_get(struct rte_eth_dev *dev,
9522 : : struct rte_flow_port_info *port_info,
9523 : : struct rte_flow_queue_info *queue_info,
9524 : : struct rte_flow_error *error __rte_unused)
9525 : : {
9526 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9527 : 0 : uint16_t port_id = dev->data->port_id;
9528 : : struct rte_mtr_capabilities mtr_cap;
9529 : : int ret;
9530 : :
9531 : : memset(port_info, 0, sizeof(*port_info));
9532 : : /* Queue size is unlimited from low-level. */
9533 : 0 : port_info->max_nb_queues = UINT32_MAX;
9534 : 0 : queue_info->max_size = UINT32_MAX;
9535 : :
9536 : : memset(&mtr_cap, 0, sizeof(struct rte_mtr_capabilities));
9537 : 0 : ret = rte_mtr_capabilities_get(port_id, &mtr_cap, NULL);
9538 [ # # ]: 0 : if (!ret)
9539 : 0 : port_info->max_nb_meters = mtr_cap.n_max;
9540 : 0 : port_info->max_nb_counters = priv->sh->hws_max_nb_counters;
9541 : 0 : port_info->max_nb_aging_objects = port_info->max_nb_counters;
9542 : 0 : return 0;
9543 : : }
9544 : :
9545 : : /**
9546 : : * Create group callback.
9547 : : *
9548 : : * @param[in] tool_ctx
9549 : : * Pointer to the hash list related context.
9550 : : * @param[in] cb_ctx
9551 : : * Pointer to the group creation context.
9552 : : *
9553 : : * @return
9554 : : * Group entry on success, NULL otherwise and rte_errno is set.
9555 : : */
9556 : : struct mlx5_list_entry *
9557 : 0 : mlx5_flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
9558 : : {
9559 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
9560 : : struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9561 : 0 : struct rte_eth_dev *dev = ctx->dev;
9562 : 0 : struct rte_flow_attr *attr = (struct rte_flow_attr *)ctx->data;
9563 : 0 : uint32_t *specialize = (uint32_t *)ctx->data2;
9564 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
9565 : : bool unified_fdb = is_unified_fdb(priv);
9566 : 0 : struct mlx5dr_table_attr dr_tbl_attr = {0};
9567 : 0 : struct rte_flow_error *error = ctx->error;
9568 : : struct mlx5_flow_group *grp_data;
9569 : : struct mlx5dr_table *tbl = NULL;
9570 : : struct mlx5dr_action *jump;
9571 : : uint32_t hws_flags;
9572 : 0 : uint32_t idx = 0;
9573 [ # # # # : 0 : MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
# # # # ]
9574 : : attr->transfer ? "FDB" : "NIC", attr->egress ? "egress" : "ingress",
9575 : : attr->group, idx);
9576 : :
9577 : 0 : grp_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9578 [ # # ]: 0 : if (!grp_data) {
9579 : 0 : rte_flow_error_set(error, ENOMEM,
9580 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9581 : : NULL,
9582 : : "cannot allocate flow table data entry");
9583 : 0 : return NULL;
9584 : : }
9585 : 0 : dr_tbl_attr.level = attr->group;
9586 : 0 : dr_tbl_attr.type = get_mlx5dr_table_type(attr, *specialize, unified_fdb);
9587 : 0 : tbl = mlx5dr_table_create(priv->dr_ctx, &dr_tbl_attr);
9588 [ # # ]: 0 : if (!tbl)
9589 : 0 : goto error;
9590 : 0 : grp_data->tbl = tbl;
9591 [ # # ]: 0 : if (attr->group) {
9592 : 0 : hws_flags = mlx5_hw_act_dest_table_flag[dr_tbl_attr.type];
9593 : : /* For case of jump from FDB Tx to FDB Rx as it is supported now. */
9594 [ # # # # ]: 0 : if (priv->jump_fdb_rx_en &&
9595 : : dr_tbl_attr.type == MLX5DR_TABLE_TYPE_FDB_RX)
9596 : 0 : hws_flags |= MLX5DR_ACTION_FLAG_HWS_FDB_TX;
9597 : : /* Jump action be used by non-root table. */
9598 : 0 : jump = mlx5dr_action_create_dest_table
9599 : : (priv->dr_ctx, tbl,
9600 : : hws_flags);
9601 [ # # ]: 0 : if (!jump)
9602 : 0 : goto error;
9603 : 0 : grp_data->jump.hws_action = jump;
9604 : : /* Jump action be used by root table. */
9605 : 0 : jump = mlx5dr_action_create_dest_table
9606 : : (priv->dr_ctx, tbl,
9607 : : mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_ROOT]
9608 : 0 : [dr_tbl_attr.type]);
9609 [ # # ]: 0 : if (!jump)
9610 : 0 : goto error;
9611 : 0 : grp_data->jump.root_action = jump;
9612 : : }
9613 : :
9614 : 0 : grp_data->matchers = mlx5_list_create(matcher_name, sh, true,
9615 : : mlx5_flow_matcher_create_cb,
9616 : : mlx5_flow_matcher_match_cb,
9617 : : mlx5_flow_matcher_remove_cb,
9618 : : mlx5_flow_matcher_clone_cb,
9619 : : mlx5_flow_matcher_clone_free_cb);
9620 : 0 : grp_data->dev = dev;
9621 : 0 : grp_data->idx = idx;
9622 : 0 : grp_data->group_id = attr->group;
9623 : 0 : grp_data->type = dr_tbl_attr.type;
9624 : 0 : return &grp_data->entry;
9625 : 0 : error:
9626 [ # # ]: 0 : if (grp_data->jump.root_action)
9627 : 0 : mlx5dr_action_destroy(grp_data->jump.root_action);
9628 [ # # ]: 0 : if (grp_data->jump.hws_action)
9629 : 0 : mlx5dr_action_destroy(grp_data->jump.hws_action);
9630 [ # # ]: 0 : if (tbl)
9631 : 0 : mlx5dr_table_destroy(tbl);
9632 [ # # ]: 0 : if (idx)
9633 : 0 : mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], idx);
9634 : 0 : rte_flow_error_set(error, ENOMEM,
9635 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9636 : : NULL,
9637 : : "cannot allocate flow dr table");
9638 : 0 : return NULL;
9639 : : }
9640 : :
9641 : : /**
9642 : : * Remove group callback.
9643 : : *
9644 : : * @param[in] tool_ctx
9645 : : * Pointer to the hash list related context.
9646 : : * @param[in] entry
9647 : : * Pointer to the entry to be removed.
9648 : : */
9649 : : void
9650 : 0 : mlx5_flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9651 : : {
9652 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
9653 : : struct mlx5_flow_group *grp_data =
9654 : : container_of(entry, struct mlx5_flow_group, entry);
9655 : :
9656 : : MLX5_ASSERT(entry && sh);
9657 : : /* To use the wrapper glue functions instead. */
9658 [ # # ]: 0 : if (grp_data->jump.hws_action)
9659 : 0 : mlx5dr_action_destroy(grp_data->jump.hws_action);
9660 [ # # ]: 0 : if (grp_data->jump.root_action)
9661 : 0 : mlx5dr_action_destroy(grp_data->jump.root_action);
9662 : 0 : mlx5_list_destroy(grp_data->matchers);
9663 : 0 : mlx5dr_table_destroy(grp_data->tbl);
9664 : 0 : mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9665 : 0 : }
9666 : :
9667 : : /**
9668 : : * Match group callback.
9669 : : *
9670 : : * @param[in] tool_ctx
9671 : : * Pointer to the hash list related context.
9672 : : * @param[in] entry
9673 : : * Pointer to the group to be matched.
9674 : : * @param[in] cb_ctx
9675 : : * Pointer to the group matching context.
9676 : : *
9677 : : * @return
9678 : : * 0 on matched, 1 on miss matched.
9679 : : */
9680 : : int
9681 : 0 : mlx5_flow_hw_grp_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
9682 : : void *cb_ctx)
9683 : : {
9684 : : struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9685 : : struct mlx5_flow_group *grp_data =
9686 : : container_of(entry, struct mlx5_flow_group, entry);
9687 : 0 : struct rte_flow_attr *attr =
9688 : : (struct rte_flow_attr *)ctx->data;
9689 : :
9690 : 0 : return (grp_data->dev != ctx->dev) ||
9691 [ # # ]: 0 : (grp_data->group_id != attr->group) ||
9692 [ # # # # ]: 0 : ((grp_data->type < MLX5DR_TABLE_TYPE_FDB) &&
9693 [ # # ]: 0 : attr->transfer) ||
9694 [ # # ]: 0 : ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_TX) &&
9695 [ # # # # ]: 0 : attr->egress) ||
9696 [ # # ]: 0 : ((grp_data->type != MLX5DR_TABLE_TYPE_NIC_RX) &&
9697 : : attr->ingress);
9698 : : }
9699 : :
9700 : : /**
9701 : : * Clone group entry callback.
9702 : : *
9703 : : * @param[in] tool_ctx
9704 : : * Pointer to the hash list related context.
9705 : : * @param[in] entry
9706 : : * Pointer to the group to be matched.
9707 : : * @param[in] cb_ctx
9708 : : * Pointer to the group matching context.
9709 : : *
9710 : : * @return
9711 : : * 0 on matched, 1 on miss matched.
9712 : : */
9713 : : struct mlx5_list_entry *
9714 : 0 : mlx5_flow_hw_grp_clone_cb(void *tool_ctx, struct mlx5_list_entry *oentry,
9715 : : void *cb_ctx)
9716 : : {
9717 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
9718 : : struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9719 : : struct mlx5_flow_group *grp_data;
9720 : : struct mlx5_flow_group *old_grp_data;
9721 : 0 : struct rte_flow_error *error = ctx->error;
9722 : 0 : uint32_t idx = 0;
9723 : :
9724 : 0 : grp_data = mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_HW_GRP], &idx);
9725 [ # # ]: 0 : if (!grp_data) {
9726 : 0 : rte_flow_error_set(error, ENOMEM,
9727 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9728 : : NULL,
9729 : : "cannot allocate flow table data entry");
9730 : 0 : return NULL;
9731 : : }
9732 : : old_grp_data = container_of(oentry, typeof(*old_grp_data), entry);
9733 : : memcpy(grp_data, old_grp_data, sizeof(*grp_data));
9734 : 0 : grp_data->idx = idx;
9735 : 0 : return &grp_data->entry;
9736 : : }
9737 : :
9738 : : /**
9739 : : * Free cloned group entry callback.
9740 : : *
9741 : : * @param[in] tool_ctx
9742 : : * Pointer to the hash list related context.
9743 : : * @param[in] entry
9744 : : * Pointer to the group to be freed.
9745 : : */
9746 : : void
9747 : 0 : mlx5_flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
9748 : : {
9749 : : struct mlx5_dev_ctx_shared *sh = tool_ctx;
9750 : : struct mlx5_flow_group *grp_data =
9751 : : container_of(entry, struct mlx5_flow_group, entry);
9752 : :
9753 : 0 : mlx5_ipool_free(sh->ipool[MLX5_IPOOL_HW_GRP], grp_data->idx);
9754 : 0 : }
9755 : :
9756 : : /**
9757 : : * Create and cache a vport action for given @p dev port. vport actions
9758 : : * cache is used in HWS with FDB flows.
9759 : : *
9760 : : * This function does not create any function if proxy port for @p dev port
9761 : : * was not configured for HW Steering.
9762 : : *
9763 : : * This function assumes that E-Switch is enabled and PMD is running with
9764 : : * HW Steering configured.
9765 : : *
9766 : : * @param dev
9767 : : * Pointer to Ethernet device which will be the action destination.
9768 : : *
9769 : : * @return
9770 : : * 0 on success, positive value otherwise.
9771 : : */
9772 : : int
9773 : 0 : mlx5_flow_hw_create_vport_action(struct rte_eth_dev *dev)
9774 : : {
9775 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9776 : : struct rte_eth_dev *proxy_dev;
9777 : : struct mlx5_priv *proxy_priv;
9778 : 0 : uint16_t port_id = dev->data->port_id;
9779 : 0 : uint16_t proxy_port_id = port_id;
9780 : : int ret;
9781 : :
9782 : 0 : ret = mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL);
9783 [ # # ]: 0 : if (ret)
9784 : : return ret;
9785 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
9786 : 0 : proxy_priv = proxy_dev->data->dev_private;
9787 [ # # ]: 0 : if (!proxy_priv->hw_vport)
9788 : : return 0;
9789 [ # # ]: 0 : if (proxy_priv->hw_vport[port_id]) {
9790 : 0 : DRV_LOG(ERR, "port %u HWS vport action already created",
9791 : : port_id);
9792 : 0 : return -EINVAL;
9793 : : }
9794 [ # # ]: 0 : proxy_priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9795 : : (proxy_priv->dr_ctx, priv->dev_port,
9796 : : is_unified_fdb(priv) ?
9797 : : (MLX5DR_ACTION_FLAG_HWS_FDB_RX |
9798 : : MLX5DR_ACTION_FLAG_HWS_FDB_TX |
9799 : : MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED) :
9800 : : MLX5DR_ACTION_FLAG_HWS_FDB);
9801 [ # # ]: 0 : if (!proxy_priv->hw_vport[port_id]) {
9802 : 0 : DRV_LOG(ERR, "port %u unable to create HWS vport action",
9803 : : port_id);
9804 : 0 : return -EINVAL;
9805 : : }
9806 : : return 0;
9807 : : }
9808 : :
9809 : : /**
9810 : : * Destroys the vport action associated with @p dev device
9811 : : * from actions' cache.
9812 : : *
9813 : : * This function does not destroy any action if there is no action cached
9814 : : * for @p dev or proxy port was not configured for HW Steering.
9815 : : *
9816 : : * This function assumes that E-Switch is enabled and PMD is running with
9817 : : * HW Steering configured.
9818 : : *
9819 : : * @param dev
9820 : : * Pointer to Ethernet device which will be the action destination.
9821 : : */
9822 : : void
9823 : 0 : mlx5_flow_hw_destroy_vport_action(struct rte_eth_dev *dev)
9824 : : {
9825 : : struct rte_eth_dev *proxy_dev;
9826 : : struct mlx5_priv *proxy_priv;
9827 : 0 : uint16_t port_id = dev->data->port_id;
9828 : 0 : uint16_t proxy_port_id = port_id;
9829 : :
9830 [ # # ]: 0 : if (mlx5_flow_pick_transfer_proxy(dev, &proxy_port_id, NULL))
9831 : 0 : return;
9832 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
9833 : 0 : proxy_priv = proxy_dev->data->dev_private;
9834 [ # # # # ]: 0 : if (!proxy_priv->hw_vport || !proxy_priv->hw_vport[port_id])
9835 : : return;
9836 : 0 : mlx5dr_action_destroy(proxy_priv->hw_vport[port_id]);
9837 : 0 : proxy_priv->hw_vport[port_id] = NULL;
9838 : : }
9839 : :
9840 : : static int
9841 : 0 : flow_hw_create_vport_actions(struct mlx5_priv *priv)
9842 : : {
9843 : : uint16_t port_id;
9844 : :
9845 : : MLX5_ASSERT(!priv->hw_vport);
9846 : : bool unified_fdb = is_unified_fdb(priv);
9847 : 0 : priv->hw_vport = mlx5_malloc(MLX5_MEM_ZERO,
9848 : : sizeof(*priv->hw_vport) * RTE_MAX_ETHPORTS,
9849 : : 0, SOCKET_ID_ANY);
9850 [ # # ]: 0 : if (!priv->hw_vport)
9851 : : return -ENOMEM;
9852 : 0 : DRV_LOG(DEBUG, "port %u :: creating vport actions", priv->dev_data->port_id);
9853 : 0 : DRV_LOG(DEBUG, "port %u :: domain_id=%u", priv->dev_data->port_id, priv->domain_id);
9854 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, NULL) {
9855 : 0 : struct mlx5_priv *port_priv = rte_eth_devices[port_id].data->dev_private;
9856 : :
9857 [ # # ]: 0 : if (!port_priv ||
9858 [ # # ]: 0 : port_priv->domain_id != priv->domain_id)
9859 : 0 : continue;
9860 : 0 : DRV_LOG(DEBUG, "port %u :: for port_id=%u, calling mlx5dr_action_create_dest_vport() with ibport=%u",
9861 : : priv->dev_data->port_id, port_id, port_priv->dev_port);
9862 [ # # ]: 0 : priv->hw_vport[port_id] = mlx5dr_action_create_dest_vport
9863 : : (priv->dr_ctx, port_priv->dev_port,
9864 : : unified_fdb ?
9865 : : (MLX5DR_ACTION_FLAG_HWS_FDB_RX |
9866 : : MLX5DR_ACTION_FLAG_HWS_FDB_TX |
9867 : : MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED) :
9868 : : MLX5DR_ACTION_FLAG_HWS_FDB);
9869 : 0 : DRV_LOG(DEBUG, "port %u :: priv->hw_vport[%u]=%p",
9870 : : priv->dev_data->port_id, port_id, (void *)priv->hw_vport[port_id]);
9871 [ # # ]: 0 : if (!priv->hw_vport[port_id])
9872 : : return -EINVAL;
9873 : : }
9874 : : return 0;
9875 : : }
9876 : :
9877 : : static void
9878 : 0 : flow_hw_free_vport_actions(struct mlx5_priv *priv)
9879 : : {
9880 : : uint16_t port_id;
9881 : :
9882 [ # # ]: 0 : if (!priv->hw_vport)
9883 : : return;
9884 [ # # ]: 0 : for (port_id = 0; port_id < RTE_MAX_ETHPORTS; ++port_id)
9885 [ # # ]: 0 : if (priv->hw_vport[port_id])
9886 : 0 : mlx5dr_action_destroy(priv->hw_vport[port_id]);
9887 : 0 : mlx5_free(priv->hw_vport);
9888 : 0 : priv->hw_vport = NULL;
9889 : : }
9890 : :
9891 : : static bool
9892 : : flow_hw_should_create_nat64_actions(struct mlx5_priv *priv)
9893 : : {
9894 : : int i;
9895 : :
9896 : : /* Check if all registers are available. */
9897 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_NAT64_REGS_MAX; ++i)
9898 [ # # ]: 0 : if (priv->sh->registers.nat64_regs[i] == REG_NON)
9899 : : return false;
9900 : :
9901 : : return true;
9902 : : }
9903 : :
9904 : : /**
9905 : : * Create an egress pattern template matching on source SQ.
9906 : : *
9907 : : * @param dev
9908 : : * Pointer to Ethernet device.
9909 : : * @param[out] error
9910 : : * Pointer to error structure.
9911 : : *
9912 : : * @return
9913 : : * Pointer to pattern template on success. NULL otherwise, and rte_errno is set.
9914 : : */
9915 : : static struct rte_flow_pattern_template *
9916 : 0 : flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev, struct rte_flow_error *error)
9917 : : {
9918 : 0 : struct rte_flow_pattern_template_attr attr = {
9919 : : .relaxed_matching = 0,
9920 : : .egress = 1,
9921 : : };
9922 : 0 : struct mlx5_rte_flow_item_sq sq_mask = {
9923 : : .queue = UINT32_MAX,
9924 : : };
9925 : 0 : struct rte_flow_item items[] = {
9926 : : {
9927 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
9928 : : .mask = &sq_mask,
9929 : : },
9930 : : {
9931 : : .type = RTE_FLOW_ITEM_TYPE_END,
9932 : : },
9933 : : };
9934 : :
9935 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, false, error);
9936 : : }
9937 : :
9938 : : static __rte_always_inline uint32_t
9939 : : flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev)
9940 : : {
9941 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9942 : 0 : uint32_t mask = priv->sh->dv_regc0_mask;
9943 : :
9944 : : /* Mask is verified during device initialization. Sanity checking here. */
9945 : : MLX5_ASSERT(mask != 0);
9946 : : /*
9947 : : * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9948 : : * Sanity checking here.
9949 : : */
9950 : : MLX5_ASSERT(rte_popcount32(mask) >= rte_popcount32(priv->vport_meta_mask));
9951 : : return mask;
9952 : : }
9953 : :
9954 : : static __rte_always_inline uint32_t
9955 : : flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev)
9956 : : {
9957 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
9958 : : uint32_t tag;
9959 : :
9960 : : /* Mask is verified during device initialization. Sanity checking here. */
9961 : : MLX5_ASSERT(priv->vport_meta_mask != 0);
9962 [ # # ]: 0 : tag = priv->vport_meta_tag >> (rte_bsf32(priv->vport_meta_mask));
9963 : : /*
9964 : : * Availability of sufficient number of bits in REG_C_0 is verified on initialization.
9965 : : * Sanity checking here.
9966 : : */
9967 : : MLX5_ASSERT((tag & priv->sh->dv_regc0_mask) == tag);
9968 : : return tag;
9969 : : }
9970 : :
9971 : : static void
9972 : : flow_hw_update_action_mask(struct rte_flow_action *action,
9973 : : struct rte_flow_action *mask,
9974 : : enum rte_flow_action_type type,
9975 : : void *conf_v,
9976 : : void *conf_m)
9977 : : {
9978 : 0 : action->type = type;
9979 : 0 : action->conf = conf_v;
9980 : 0 : mask->type = type;
9981 : 0 : mask->conf = conf_m;
9982 : : }
9983 : :
9984 : : /**
9985 : : * Create an egress actions template with MODIFY_FIELD action for setting unused REG_C_0 bits
9986 : : * to vport tag and JUMP action to group 1.
9987 : : *
9988 : : * If extended metadata mode is enabled, then MODIFY_FIELD action for copying software metadata
9989 : : * to REG_C_1 is added as well.
9990 : : *
9991 : : * @param dev
9992 : : * Pointer to Ethernet device.
9993 : : * @param[out] error
9994 : : * Pointer to error structure.
9995 : : *
9996 : : * @return
9997 : : * Pointer to actions template on success. NULL otherwise, and rte_errno is set.
9998 : : */
9999 : : static struct rte_flow_actions_template *
10000 : 0 : flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev,
10001 : : struct rte_flow_error *error)
10002 : : {
10003 [ # # ]: 0 : uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev);
10004 : 0 : uint32_t tag_value = flow_hw_tx_tag_regc_value(dev);
10005 : 0 : struct rte_flow_actions_template_attr attr = {
10006 : : .egress = 1,
10007 : : };
10008 [ # # ]: 0 : struct rte_flow_action_modify_field set_tag_v = {
10009 : : .operation = RTE_FLOW_MODIFY_SET,
10010 : : .dst = {
10011 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10012 : : .tag_index = REG_C_0,
10013 : : .offset = rte_bsf32(tag_mask),
10014 : : },
10015 : : .src = {
10016 : : .field = RTE_FLOW_FIELD_VALUE,
10017 : : },
10018 : : .width = rte_popcount32(tag_mask),
10019 : : };
10020 : 0 : struct rte_flow_action_modify_field set_tag_m = {
10021 : : .operation = RTE_FLOW_MODIFY_SET,
10022 : : .dst = {
10023 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10024 : : .level = UINT8_MAX,
10025 : : .tag_index = UINT8_MAX,
10026 : : .offset = UINT32_MAX,
10027 : : },
10028 : : .src = {
10029 : : .field = RTE_FLOW_FIELD_VALUE,
10030 : : },
10031 : : .width = UINT32_MAX,
10032 : : };
10033 : 0 : struct rte_flow_action_modify_field copy_metadata_v = {
10034 : : .operation = RTE_FLOW_MODIFY_SET,
10035 : : .dst = {
10036 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10037 : : .tag_index = REG_C_1,
10038 : : },
10039 : : .src = {
10040 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10041 : : .tag_index = REG_A,
10042 : : },
10043 : : .width = 32,
10044 : : };
10045 : 0 : struct rte_flow_action_modify_field copy_metadata_m = {
10046 : : .operation = RTE_FLOW_MODIFY_SET,
10047 : : .dst = {
10048 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10049 : : .level = UINT8_MAX,
10050 : : .tag_index = UINT8_MAX,
10051 : : .offset = UINT32_MAX,
10052 : : },
10053 : : .src = {
10054 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10055 : : .level = UINT8_MAX,
10056 : : .tag_index = UINT8_MAX,
10057 : : .offset = UINT32_MAX,
10058 : : },
10059 : : .width = UINT32_MAX,
10060 : : };
10061 : 0 : struct rte_flow_action_jump jump_v = {
10062 : : .group = MLX5_HW_LOWEST_USABLE_GROUP,
10063 : : };
10064 : 0 : struct rte_flow_action_jump jump_m = {
10065 : : .group = UINT32_MAX,
10066 : : };
10067 : 0 : struct rte_flow_action actions_v[4] = { { 0 } };
10068 [ # # ]: 0 : struct rte_flow_action actions_m[4] = { { 0 } };
10069 : : unsigned int idx = 0;
10070 : :
10071 : : rte_memcpy(set_tag_v.src.value, &tag_value, sizeof(tag_value));
10072 : : rte_memcpy(set_tag_m.src.value, &tag_mask, sizeof(tag_mask));
10073 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
10074 : : RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10075 : : &set_tag_v, &set_tag_m);
10076 : : idx++;
10077 [ # # ]: 0 : if (MLX5_SH(dev)->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
10078 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx],
10079 : : RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10080 : : ©_metadata_v, ©_metadata_m);
10081 : : idx++;
10082 : : }
10083 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_JUMP,
10084 : : &jump_v, &jump_m);
10085 : 0 : idx++;
10086 : : flow_hw_update_action_mask(&actions_v[idx], &actions_m[idx], RTE_FLOW_ACTION_TYPE_END,
10087 : : NULL, NULL);
10088 : : idx++;
10089 : : MLX5_ASSERT(idx <= RTE_DIM(actions_v));
10090 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10091 : : }
10092 : :
10093 : : static void
10094 : 0 : flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev)
10095 : : {
10096 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10097 : :
10098 [ # # ]: 0 : if (priv->hw_tx_repr_tagging_tbl) {
10099 : 0 : flow_hw_table_destroy(dev, priv->hw_tx_repr_tagging_tbl, NULL);
10100 : 0 : priv->hw_tx_repr_tagging_tbl = NULL;
10101 : : }
10102 [ # # ]: 0 : if (priv->hw_tx_repr_tagging_at) {
10103 : 0 : flow_hw_actions_template_destroy(dev, priv->hw_tx_repr_tagging_at, NULL);
10104 : 0 : priv->hw_tx_repr_tagging_at = NULL;
10105 : : }
10106 [ # # ]: 0 : if (priv->hw_tx_repr_tagging_pt) {
10107 : 0 : flow_hw_pattern_template_destroy(dev, priv->hw_tx_repr_tagging_pt, NULL);
10108 : 0 : priv->hw_tx_repr_tagging_pt = NULL;
10109 : : }
10110 : 0 : }
10111 : :
10112 : : /**
10113 : : * Setup templates and table used to create default Tx flow rules. These default rules
10114 : : * allow for matching Tx representor traffic using a vport tag placed in unused bits of
10115 : : * REG_C_0 register.
10116 : : *
10117 : : * @param dev
10118 : : * Pointer to Ethernet device.
10119 : : * @param[out] error
10120 : : * Pointer to error structure.
10121 : : *
10122 : : * @return
10123 : : * 0 on success, negative errno value otherwise.
10124 : : */
10125 : : static int
10126 : 0 : flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev, struct rte_flow_error *error)
10127 : : {
10128 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10129 : 0 : struct rte_flow_template_table_attr attr = {
10130 : : .flow_attr = {
10131 : : .group = 0,
10132 : : .priority = MLX5_HW_LOWEST_PRIO_ROOT,
10133 : : .egress = 1,
10134 : : },
10135 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10136 : : };
10137 : 0 : struct mlx5_flow_template_table_cfg cfg = {
10138 : : .attr = attr,
10139 : : .external = false,
10140 : : };
10141 : :
10142 : : MLX5_ASSERT(priv->sh->config.dv_esw_en);
10143 : 0 : priv->hw_tx_repr_tagging_pt =
10144 : 0 : flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
10145 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_pt)
10146 : 0 : goto err;
10147 : 0 : priv->hw_tx_repr_tagging_at =
10148 : 0 : flow_hw_create_tx_repr_tag_jump_acts_tmpl(dev, error);
10149 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_at)
10150 : 0 : goto err;
10151 : 0 : priv->hw_tx_repr_tagging_tbl = flow_hw_table_create(dev, &cfg,
10152 : : &priv->hw_tx_repr_tagging_pt, 1,
10153 : : &priv->hw_tx_repr_tagging_at, 1,
10154 : : error);
10155 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_tbl)
10156 : 0 : goto err;
10157 : : return 0;
10158 : 0 : err:
10159 : 0 : flow_hw_cleanup_tx_repr_tagging(dev);
10160 : 0 : return -rte_errno;
10161 : : }
10162 : :
10163 : : static uint32_t
10164 : : flow_hw_esw_mgr_regc_marker_mask(struct rte_eth_dev *dev)
10165 : : {
10166 : 0 : uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
10167 : :
10168 : : /* Mask is verified during device initialization. */
10169 : : MLX5_ASSERT(mask != 0);
10170 : : return mask;
10171 : : }
10172 : :
10173 : : static uint32_t
10174 : : flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev)
10175 : : {
10176 : 0 : uint32_t mask = MLX5_SH(dev)->dv_regc0_mask;
10177 : :
10178 : : /* Mask is verified during device initialization. */
10179 : : MLX5_ASSERT(mask != 0);
10180 : 0 : return RTE_BIT32(rte_bsf32(mask));
10181 : : }
10182 : :
10183 : : /**
10184 : : * Creates a flow pattern template used to match on E-Switch Manager.
10185 : : * This template is used to set up a table for SQ miss default flow.
10186 : : *
10187 : : * @param dev
10188 : : * Pointer to Ethernet device.
10189 : : * @param error
10190 : : * Pointer to error structure.
10191 : : *
10192 : : * @return
10193 : : * Pointer to flow pattern template on success, NULL otherwise.
10194 : : */
10195 : : static struct rte_flow_pattern_template *
10196 : 0 : flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev,
10197 : : struct rte_flow_error *error)
10198 : : {
10199 : 0 : struct rte_flow_pattern_template_attr attr = {
10200 : : .relaxed_matching = 0,
10201 : : .transfer = 1,
10202 : : };
10203 : 0 : struct rte_flow_item_ethdev port_spec = {
10204 : : .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
10205 : : };
10206 : 0 : struct rte_flow_item_ethdev port_mask = {
10207 : : .port_id = UINT16_MAX,
10208 : : };
10209 : 0 : struct mlx5_rte_flow_item_sq sq_mask = {
10210 : : .queue = UINT32_MAX,
10211 : : };
10212 : 0 : struct rte_flow_item items[] = {
10213 : : {
10214 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
10215 : : .spec = &port_spec,
10216 : : .mask = &port_mask,
10217 : : },
10218 : : {
10219 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
10220 : : .mask = &sq_mask,
10221 : : },
10222 : : {
10223 : : .type = RTE_FLOW_ITEM_TYPE_END,
10224 : : },
10225 : : };
10226 : :
10227 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, false, error);
10228 : : }
10229 : :
10230 : : /**
10231 : : * Creates a flow pattern template used to match REG_C_0 and a SQ.
10232 : : * Matching on REG_C_0 is set up to match on all bits usable by user-space.
10233 : : * If traffic was sent from E-Switch Manager, then all usable bits will be set to 0,
10234 : : * except the least significant bit, which will be set to 1.
10235 : : *
10236 : : * This template is used to set up a table for SQ miss default flow.
10237 : : *
10238 : : * @param dev
10239 : : * Pointer to Ethernet device.
10240 : : * @param error
10241 : : * Pointer to error structure.
10242 : : *
10243 : : * @return
10244 : : * Pointer to flow pattern template on success, NULL otherwise.
10245 : : */
10246 : : static struct rte_flow_pattern_template *
10247 : 0 : flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev,
10248 : : struct rte_flow_error *error)
10249 : : {
10250 : 0 : struct rte_flow_pattern_template_attr attr = {
10251 : : .relaxed_matching = 0,
10252 : : .transfer = 1,
10253 : : };
10254 : 0 : struct rte_flow_item_tag reg_c0_spec = {
10255 : : .index = (uint8_t)REG_C_0,
10256 : : };
10257 : 0 : struct rte_flow_item_tag reg_c0_mask = {
10258 : : .index = 0xff,
10259 : : .data = flow_hw_esw_mgr_regc_marker_mask(dev),
10260 : : };
10261 : 0 : struct mlx5_rte_flow_item_sq queue_mask = {
10262 : : .queue = UINT32_MAX,
10263 : : };
10264 : 0 : struct rte_flow_item items[] = {
10265 : : {
10266 : : .type = (enum rte_flow_item_type)
10267 : : MLX5_RTE_FLOW_ITEM_TYPE_TAG,
10268 : : .spec = ®_c0_spec,
10269 : : .mask = ®_c0_mask,
10270 : : },
10271 : : {
10272 : : .type = (enum rte_flow_item_type)
10273 : : MLX5_RTE_FLOW_ITEM_TYPE_SQ,
10274 : : .mask = &queue_mask,
10275 : : },
10276 : : {
10277 : : .type = RTE_FLOW_ITEM_TYPE_END,
10278 : : },
10279 : : };
10280 : :
10281 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, false, error);
10282 : : }
10283 : :
10284 : : /**
10285 : : * Creates a flow pattern template with unmasked represented port matching.
10286 : : * This template is used to set up a table for default transfer flows
10287 : : * directing packets to group 1.
10288 : : *
10289 : : * @param dev
10290 : : * Pointer to Ethernet device.
10291 : : * @param error
10292 : : * Pointer to error structure.
10293 : : *
10294 : : * @return
10295 : : * Pointer to flow pattern template on success, NULL otherwise.
10296 : : */
10297 : : static struct rte_flow_pattern_template *
10298 : 0 : flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev,
10299 : : struct rte_flow_error *error)
10300 : : {
10301 : 0 : struct rte_flow_pattern_template_attr attr = {
10302 : : .relaxed_matching = 0,
10303 : : .transfer = 1,
10304 : : };
10305 : 0 : struct rte_flow_item_ethdev port_mask = {
10306 : : .port_id = UINT16_MAX,
10307 : : };
10308 : 0 : struct rte_flow_item items[] = {
10309 : : {
10310 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
10311 : : .mask = &port_mask,
10312 : : },
10313 : : {
10314 : : .type = RTE_FLOW_ITEM_TYPE_END,
10315 : : },
10316 : : };
10317 : :
10318 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, false, error);
10319 : : }
10320 : :
10321 : : /*
10322 : : * Creating a flow pattern template with all LACP packets matching, only for NIC
10323 : : * ingress domain.
10324 : : *
10325 : : * @param dev
10326 : : * Pointer to Ethernet device.
10327 : : * @param error
10328 : : * Pointer to error structure.
10329 : : *
10330 : : * @return
10331 : : * Pointer to flow pattern template on success, NULL otherwise.
10332 : : */
10333 : : static struct rte_flow_pattern_template *
10334 : 0 : flow_hw_create_lacp_rx_pattern_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
10335 : : {
10336 : 0 : struct rte_flow_pattern_template_attr pa_attr = {
10337 : : .relaxed_matching = 0,
10338 : : .ingress = 1,
10339 : : };
10340 : 0 : struct rte_flow_item_eth lacp_mask = {
10341 : : .dst.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10342 : : .src.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
10343 : : .type = 0xFFFF,
10344 : : };
10345 : 0 : struct rte_flow_item eth_all[] = {
10346 : : [0] = {
10347 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
10348 : : .mask = &lacp_mask,
10349 : : },
10350 : : [1] = {
10351 : : .type = RTE_FLOW_ITEM_TYPE_END,
10352 : : },
10353 : : };
10354 : 0 : return flow_hw_pattern_template_create(dev, &pa_attr, eth_all,
10355 : : false, error);
10356 : : }
10357 : :
10358 : : /**
10359 : : * Creates a flow actions template with modify field action and masked jump action.
10360 : : * Modify field action sets the least significant bit of REG_C_0 (usable by user-space)
10361 : : * to 1, meaning that packet was originated from E-Switch Manager. Jump action
10362 : : * transfers steering to group 1.
10363 : : *
10364 : : * @param dev
10365 : : * Pointer to Ethernet device.
10366 : : * @param error
10367 : : * Pointer to error structure.
10368 : : *
10369 : : * @return
10370 : : * Pointer to flow actions template on success, NULL otherwise.
10371 : : */
10372 : : static struct rte_flow_actions_template *
10373 : 0 : flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev,
10374 : : struct rte_flow_error *error)
10375 : : {
10376 [ # # ]: 0 : uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev);
10377 : 0 : uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev);
10378 : 0 : struct rte_flow_actions_template_attr attr = {
10379 : : .transfer = 1,
10380 : : };
10381 [ # # ]: 0 : struct rte_flow_action_modify_field set_reg_v = {
10382 : : .operation = RTE_FLOW_MODIFY_SET,
10383 : : .dst = {
10384 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10385 : : .tag_index = REG_C_0,
10386 : : },
10387 : : .src = {
10388 : : .field = RTE_FLOW_FIELD_VALUE,
10389 : : },
10390 : : .width = rte_popcount32(marker_mask),
10391 : : };
10392 : 0 : struct rte_flow_action_modify_field set_reg_m = {
10393 : : .operation = RTE_FLOW_MODIFY_SET,
10394 : : .dst = {
10395 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10396 : : .level = UINT8_MAX,
10397 : : .tag_index = UINT8_MAX,
10398 : : .offset = UINT32_MAX,
10399 : : },
10400 : : .src = {
10401 : : .field = RTE_FLOW_FIELD_VALUE,
10402 : : },
10403 : : .width = UINT32_MAX,
10404 : : };
10405 : 0 : struct rte_flow_action_jump jump_v = {
10406 : : .group = MLX5_HW_LOWEST_USABLE_GROUP,
10407 : : };
10408 : 0 : struct rte_flow_action_jump jump_m = {
10409 : : .group = UINT32_MAX,
10410 : : };
10411 : 0 : struct rte_flow_action actions_v[] = {
10412 : : {
10413 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10414 : : .conf = &set_reg_v,
10415 : : },
10416 : : {
10417 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
10418 : : .conf = &jump_v,
10419 : : },
10420 : : {
10421 : : .type = RTE_FLOW_ACTION_TYPE_END,
10422 : : }
10423 : : };
10424 : 0 : struct rte_flow_action actions_m[] = {
10425 : : {
10426 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10427 : : .conf = &set_reg_m,
10428 : : },
10429 : : {
10430 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
10431 : : .conf = &jump_m,
10432 : : },
10433 : : {
10434 : : .type = RTE_FLOW_ACTION_TYPE_END,
10435 : : }
10436 : : };
10437 : :
10438 [ # # ]: 0 : set_reg_v.dst.offset = rte_bsf32(marker_mask);
10439 : : rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits));
10440 : : rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask));
10441 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10442 : : }
10443 : :
10444 : : /**
10445 : : * Creates a flow actions template with an unmasked JUMP action. Flows
10446 : : * based on this template will perform a jump to some group. This template
10447 : : * is used to set up tables for control flows.
10448 : : *
10449 : : * @param dev
10450 : : * Pointer to Ethernet device.
10451 : : * @param group
10452 : : * Destination group for this action template.
10453 : : * @param error
10454 : : * Pointer to error structure.
10455 : : *
10456 : : * @return
10457 : : * Pointer to flow actions template on success, NULL otherwise.
10458 : : */
10459 : : static struct rte_flow_actions_template *
10460 : 0 : flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev,
10461 : : uint32_t group,
10462 : : struct rte_flow_error *error)
10463 : : {
10464 : 0 : struct rte_flow_actions_template_attr attr = {
10465 : : .transfer = 1,
10466 : : };
10467 : 0 : struct rte_flow_action_jump jump_v = {
10468 : : .group = group,
10469 : : };
10470 : 0 : struct rte_flow_action_jump jump_m = {
10471 : : .group = UINT32_MAX,
10472 : : };
10473 : 0 : struct rte_flow_action actions_v[] = {
10474 : : {
10475 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
10476 : : .conf = &jump_v,
10477 : : },
10478 : : {
10479 : : .type = RTE_FLOW_ACTION_TYPE_END,
10480 : : }
10481 : : };
10482 : 0 : struct rte_flow_action actions_m[] = {
10483 : : {
10484 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
10485 : : .conf = &jump_m,
10486 : : },
10487 : : {
10488 : : .type = RTE_FLOW_ACTION_TYPE_END,
10489 : : }
10490 : : };
10491 : :
10492 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v,
10493 : : actions_m, error);
10494 : : }
10495 : :
10496 : : /**
10497 : : * Creates a flow action template with a unmasked REPRESENTED_PORT action.
10498 : : * It is used to create control flow tables.
10499 : : *
10500 : : * @param dev
10501 : : * Pointer to Ethernet device.
10502 : : * @param error
10503 : : * Pointer to error structure.
10504 : : *
10505 : : * @return
10506 : : * Pointer to flow action template on success, NULL otherwise.
10507 : : */
10508 : : static struct rte_flow_actions_template *
10509 : 0 : flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev,
10510 : : struct rte_flow_error *error)
10511 : : {
10512 : 0 : struct rte_flow_actions_template_attr attr = {
10513 : : .transfer = 1,
10514 : : };
10515 : 0 : struct rte_flow_action_ethdev port_v = {
10516 : : .port_id = 0,
10517 : : };
10518 : 0 : struct rte_flow_action actions_v[] = {
10519 : : {
10520 : : .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10521 : : .conf = &port_v,
10522 : : },
10523 : : {
10524 : : .type = RTE_FLOW_ACTION_TYPE_END,
10525 : : }
10526 : : };
10527 : 0 : struct rte_flow_action_ethdev port_m = {
10528 : : .port_id = 0,
10529 : : };
10530 : 0 : struct rte_flow_action actions_m[] = {
10531 : : {
10532 : : .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
10533 : : .conf = &port_m,
10534 : : },
10535 : : {
10536 : : .type = RTE_FLOW_ACTION_TYPE_END,
10537 : : }
10538 : : };
10539 : :
10540 : 0 : return flow_hw_actions_template_create(dev, &attr, actions_v, actions_m, error);
10541 : : }
10542 : :
10543 : : /*
10544 : : * Creating an actions template to use header modify action for register
10545 : : * copying. This template is used to set up a table for copy flow.
10546 : : *
10547 : : * @param dev
10548 : : * Pointer to Ethernet device.
10549 : : * @param error
10550 : : * Pointer to error structure.
10551 : : *
10552 : : * @return
10553 : : * Pointer to flow actions template on success, NULL otherwise.
10554 : : */
10555 : : static struct rte_flow_actions_template *
10556 : 0 : flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev,
10557 : : struct rte_flow_error *error)
10558 : : {
10559 : 0 : struct rte_flow_actions_template_attr tx_act_attr = {
10560 : : .egress = 1,
10561 : : };
10562 : 0 : const struct rte_flow_action_modify_field mreg_action = {
10563 : : .operation = RTE_FLOW_MODIFY_SET,
10564 : : .dst = {
10565 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10566 : : .tag_index = REG_C_1,
10567 : : },
10568 : : .src = {
10569 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10570 : : .tag_index = REG_A,
10571 : : },
10572 : : .width = 32,
10573 : : };
10574 : 0 : const struct rte_flow_action_modify_field mreg_mask = {
10575 : : .operation = RTE_FLOW_MODIFY_SET,
10576 : : .dst = {
10577 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10578 : : .level = UINT8_MAX,
10579 : : .tag_index = UINT8_MAX,
10580 : : .offset = UINT32_MAX,
10581 : : },
10582 : : .src = {
10583 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
10584 : : .level = UINT8_MAX,
10585 : : .tag_index = UINT8_MAX,
10586 : : .offset = UINT32_MAX,
10587 : : },
10588 : : .width = UINT32_MAX,
10589 : : };
10590 : 0 : const struct rte_flow_action_jump jump_action = {
10591 : : .group = 1,
10592 : : };
10593 : 0 : const struct rte_flow_action_jump jump_mask = {
10594 : : .group = UINT32_MAX,
10595 : : };
10596 : 0 : const struct rte_flow_action actions[] = {
10597 : : [0] = {
10598 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10599 : : .conf = &mreg_action,
10600 : : },
10601 : : [1] = {
10602 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
10603 : : .conf = &jump_action,
10604 : : },
10605 : : [2] = {
10606 : : .type = RTE_FLOW_ACTION_TYPE_END,
10607 : : },
10608 : : };
10609 : 0 : const struct rte_flow_action masks[] = {
10610 : : [0] = {
10611 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
10612 : : .conf = &mreg_mask,
10613 : : },
10614 : : [1] = {
10615 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
10616 : : .conf = &jump_mask,
10617 : : },
10618 : : [2] = {
10619 : : .type = RTE_FLOW_ACTION_TYPE_END,
10620 : : },
10621 : : };
10622 : :
10623 : 0 : return flow_hw_actions_template_create(dev, &tx_act_attr, actions,
10624 : : masks, error);
10625 : : }
10626 : :
10627 : : /*
10628 : : * Creating an actions template to use default miss to re-route packets to the
10629 : : * kernel driver stack.
10630 : : * On root table, only DEFAULT_MISS action can be used.
10631 : : *
10632 : : * @param dev
10633 : : * Pointer to Ethernet device.
10634 : : * @param error
10635 : : * Pointer to error structure.
10636 : : *
10637 : : * @return
10638 : : * Pointer to flow actions template on success, NULL otherwise.
10639 : : */
10640 : : static struct rte_flow_actions_template *
10641 : 0 : flow_hw_create_lacp_rx_actions_template(struct rte_eth_dev *dev, struct rte_flow_error *error)
10642 : : {
10643 : 0 : struct rte_flow_actions_template_attr act_attr = {
10644 : : .ingress = 1,
10645 : : };
10646 : 0 : const struct rte_flow_action actions[] = {
10647 : : [0] = {
10648 : : .type = (enum rte_flow_action_type)
10649 : : MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
10650 : : },
10651 : : [1] = {
10652 : : .type = RTE_FLOW_ACTION_TYPE_END,
10653 : : },
10654 : : };
10655 : :
10656 : 0 : return flow_hw_actions_template_create(dev, &act_attr, actions, actions, error);
10657 : : }
10658 : :
10659 : : /**
10660 : : * Creates a control flow table used to transfer traffic from E-Switch Manager
10661 : : * and TX queues from group 0 to group 1.
10662 : : *
10663 : : * @param dev
10664 : : * Pointer to Ethernet device.
10665 : : * @param it
10666 : : * Pointer to flow pattern template.
10667 : : * @param at
10668 : : * Pointer to flow actions template.
10669 : : * @param error
10670 : : * Pointer to error structure.
10671 : : *
10672 : : * @return
10673 : : * Pointer to flow table on success, NULL otherwise.
10674 : : */
10675 : : static struct rte_flow_template_table*
10676 : 0 : flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev,
10677 : : struct rte_flow_pattern_template *it,
10678 : : struct rte_flow_actions_template *at,
10679 : : struct rte_flow_error *error)
10680 : : {
10681 : 0 : struct rte_flow_template_table_attr attr = {
10682 : : .flow_attr = {
10683 : : .group = 0,
10684 : : .priority = MLX5_HW_LOWEST_PRIO_ROOT,
10685 : : .ingress = 0,
10686 : : .egress = 0,
10687 : : .transfer = 1,
10688 : : },
10689 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10690 : : };
10691 : 0 : struct mlx5_flow_template_table_cfg cfg = {
10692 : : .attr = attr,
10693 : : .external = false,
10694 : : };
10695 : :
10696 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10697 : : }
10698 : :
10699 : :
10700 : : /**
10701 : : * Creates a control flow table used to transfer traffic from E-Switch Manager
10702 : : * and TX queues from group 0 to group 1.
10703 : : *
10704 : : * @param dev
10705 : : * Pointer to Ethernet device.
10706 : : * @param it
10707 : : * Pointer to flow pattern template.
10708 : : * @param at
10709 : : * Pointer to flow actions template.
10710 : : * @param error
10711 : : * Pointer to error structure.
10712 : : *
10713 : : * @return
10714 : : * Pointer to flow table on success, NULL otherwise.
10715 : : */
10716 : : static struct rte_flow_template_table*
10717 : 0 : flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev,
10718 : : struct rte_flow_pattern_template *it,
10719 : : struct rte_flow_actions_template *at,
10720 : : struct rte_flow_error *error)
10721 : : {
10722 : 0 : struct rte_flow_template_table_attr attr = {
10723 : : .flow_attr = {
10724 : : .group = 1,
10725 : : .priority = MLX5_HW_LOWEST_PRIO_NON_ROOT,
10726 : : .ingress = 0,
10727 : : .egress = 0,
10728 : : .transfer = 1,
10729 : : },
10730 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10731 : : };
10732 : 0 : struct mlx5_flow_template_table_cfg cfg = {
10733 : : .attr = attr,
10734 : : .external = false,
10735 : : };
10736 : :
10737 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10738 : : }
10739 : :
10740 : : /*
10741 : : * Creating the default Tx metadata copy table on NIC Tx group 0.
10742 : : *
10743 : : * @param dev
10744 : : * Pointer to Ethernet device.
10745 : : * @param pt
10746 : : * Pointer to flow pattern template.
10747 : : * @param at
10748 : : * Pointer to flow actions template.
10749 : : * @param error
10750 : : * Pointer to error structure.
10751 : : *
10752 : : * @return
10753 : : * Pointer to flow table on success, NULL otherwise.
10754 : : */
10755 : : static struct rte_flow_template_table*
10756 : 0 : flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
10757 : : struct rte_flow_pattern_template *pt,
10758 : : struct rte_flow_actions_template *at,
10759 : : struct rte_flow_error *error)
10760 : : {
10761 : 0 : struct rte_flow_template_table_attr tx_tbl_attr = {
10762 : : .flow_attr = {
10763 : : .group = 0, /* Root */
10764 : : .priority = MLX5_HW_LOWEST_PRIO_ROOT,
10765 : : .egress = 1,
10766 : : },
10767 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10768 : : };
10769 : 0 : struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
10770 : : .attr = tx_tbl_attr,
10771 : : .external = false,
10772 : : };
10773 : :
10774 : 0 : return flow_hw_table_create(dev, &tx_tbl_cfg, &pt, 1, &at, 1, error);
10775 : : }
10776 : :
10777 : : /**
10778 : : * Creates a control flow table used to transfer traffic
10779 : : * from group 0 to group 1.
10780 : : *
10781 : : * @param dev
10782 : : * Pointer to Ethernet device.
10783 : : * @param it
10784 : : * Pointer to flow pattern template.
10785 : : * @param at
10786 : : * Pointer to flow actions template.
10787 : : * @param error
10788 : : * Pointer to error structure.
10789 : : *
10790 : : * @return
10791 : : * Pointer to flow table on success, NULL otherwise.
10792 : : */
10793 : : static struct rte_flow_template_table *
10794 : 0 : flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev,
10795 : : struct rte_flow_pattern_template *it,
10796 : : struct rte_flow_actions_template *at,
10797 : : struct rte_flow_error *error)
10798 : : {
10799 : 0 : struct rte_flow_template_table_attr attr = {
10800 : : .flow_attr = {
10801 : : .group = 0,
10802 : : .priority = 0,
10803 : : .ingress = 0,
10804 : : .egress = 0,
10805 : : .transfer = 1,
10806 : : },
10807 : : .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
10808 : : };
10809 : 0 : struct mlx5_flow_template_table_cfg cfg = {
10810 : : .attr = attr,
10811 : : .external = false,
10812 : : };
10813 : :
10814 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10815 : : }
10816 : :
10817 : : /**
10818 : : * Cleans up all template tables and pattern, and actions templates used for
10819 : : * FDB control flow rules.
10820 : : *
10821 : : * @param dev
10822 : : * Pointer to Ethernet device.
10823 : : */
10824 : : static void
10825 : 0 : flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev)
10826 : : {
10827 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10828 : : struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10829 : :
10830 [ # # ]: 0 : if (!priv->hw_ctrl_fdb)
10831 : : return;
10832 : : hw_ctrl_fdb = priv->hw_ctrl_fdb;
10833 : : /* Clean up templates used for LACP default miss table. */
10834 [ # # ]: 0 : if (hw_ctrl_fdb->hw_lacp_rx_tbl)
10835 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL));
10836 [ # # ]: 0 : if (hw_ctrl_fdb->lacp_rx_actions_tmpl)
10837 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl,
10838 : : NULL));
10839 [ # # ]: 0 : if (hw_ctrl_fdb->lacp_rx_items_tmpl)
10840 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
10841 : : NULL));
10842 : : /* Clean up templates used for default FDB jump rule. */
10843 [ # # ]: 0 : if (hw_ctrl_fdb->hw_esw_zero_tbl)
10844 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL));
10845 [ # # ]: 0 : if (hw_ctrl_fdb->jump_one_actions_tmpl)
10846 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl,
10847 : : NULL));
10848 [ # # ]: 0 : if (hw_ctrl_fdb->port_items_tmpl)
10849 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl,
10850 : : NULL));
10851 : : /* Clean up templates used for default SQ miss flow rules - non-root table. */
10852 [ # # ]: 0 : if (hw_ctrl_fdb->hw_esw_sq_miss_tbl)
10853 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL));
10854 [ # # ]: 0 : if (hw_ctrl_fdb->regc_sq_items_tmpl)
10855 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10856 : : NULL));
10857 [ # # ]: 0 : if (hw_ctrl_fdb->port_actions_tmpl)
10858 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl,
10859 : : NULL));
10860 : : /* Clean up templates used for default SQ miss flow rules - root table. */
10861 [ # # ]: 0 : if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl)
10862 : 0 : claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL));
10863 [ # # ]: 0 : if (hw_ctrl_fdb->regc_jump_actions_tmpl)
10864 : 0 : claim_zero(flow_hw_actions_template_destroy(dev,
10865 : : hw_ctrl_fdb->regc_jump_actions_tmpl, NULL));
10866 [ # # ]: 0 : if (hw_ctrl_fdb->esw_mgr_items_tmpl)
10867 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10868 : : NULL));
10869 : : /* Clean up templates structure for FDB control flow rules. */
10870 : 0 : mlx5_free(hw_ctrl_fdb);
10871 : 0 : priv->hw_ctrl_fdb = NULL;
10872 : : }
10873 : :
10874 : : /*
10875 : : * Create a table on the root group to for the LACP traffic redirecting.
10876 : : *
10877 : : * @param dev
10878 : : * Pointer to Ethernet device.
10879 : : * @param it
10880 : : * Pointer to flow pattern template.
10881 : : * @param at
10882 : : * Pointer to flow actions template.
10883 : : *
10884 : : * @return
10885 : : * Pointer to flow table on success, NULL otherwise.
10886 : : */
10887 : : static struct rte_flow_template_table *
10888 : 0 : flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev,
10889 : : struct rte_flow_pattern_template *it,
10890 : : struct rte_flow_actions_template *at,
10891 : : struct rte_flow_error *error)
10892 : : {
10893 : 0 : struct rte_flow_template_table_attr attr = {
10894 : : .flow_attr = {
10895 : : .group = 0,
10896 : : .priority = 0,
10897 : : .ingress = 1,
10898 : : .egress = 0,
10899 : : .transfer = 0,
10900 : : },
10901 : : .nb_flows = 1,
10902 : : };
10903 : 0 : struct mlx5_flow_template_table_cfg cfg = {
10904 : : .attr = attr,
10905 : : .external = false,
10906 : : };
10907 : :
10908 : 0 : return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error);
10909 : : }
10910 : :
10911 : : /**
10912 : : * Creates a set of flow tables used to create control flows used
10913 : : * when E-Switch is engaged.
10914 : : *
10915 : : * @param dev
10916 : : * Pointer to Ethernet device.
10917 : : * @param error
10918 : : * Pointer to error structure.
10919 : : *
10920 : : * @return
10921 : : * 0 on success, negative values otherwise
10922 : : */
10923 : : static int
10924 : 0 : flow_hw_create_fdb_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
10925 : : {
10926 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
10927 : : struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
10928 : 0 : uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule;
10929 : :
10930 : : MLX5_ASSERT(priv->hw_ctrl_fdb == NULL);
10931 : 0 : hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY);
10932 [ # # ]: 0 : if (!hw_ctrl_fdb) {
10933 : 0 : DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates",
10934 : : dev->data->port_id);
10935 : 0 : rte_errno = ENOMEM;
10936 : 0 : goto err;
10937 : : }
10938 : 0 : priv->hw_ctrl_fdb = hw_ctrl_fdb;
10939 [ # # ]: 0 : if (fdb_def_rule) {
10940 : : /* Create templates and table for default SQ miss flow rules - root table. */
10941 : 0 : hw_ctrl_fdb->esw_mgr_items_tmpl =
10942 : 0 : flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error);
10943 [ # # ]: 0 : if (!hw_ctrl_fdb->esw_mgr_items_tmpl) {
10944 : 0 : DRV_LOG(ERR, "port %u failed to create E-Switch Manager item"
10945 : : " template for control flows", dev->data->port_id);
10946 : 0 : goto err;
10947 : : }
10948 : 0 : hw_ctrl_fdb->regc_jump_actions_tmpl =
10949 : 0 : flow_hw_create_ctrl_regc_jump_actions_template(dev, error);
10950 [ # # ]: 0 : if (!hw_ctrl_fdb->regc_jump_actions_tmpl) {
10951 : 0 : DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template"
10952 : : " for control flows", dev->data->port_id);
10953 : 0 : goto err;
10954 : : }
10955 : 0 : hw_ctrl_fdb->hw_esw_sq_miss_root_tbl =
10956 : 0 : flow_hw_create_ctrl_sq_miss_root_table
10957 : : (dev, hw_ctrl_fdb->esw_mgr_items_tmpl,
10958 : : hw_ctrl_fdb->regc_jump_actions_tmpl, error);
10959 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) {
10960 : 0 : DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)"
10961 : : " for control flows", dev->data->port_id);
10962 : 0 : goto err;
10963 : : }
10964 : : /* Create templates and table for default SQ miss flow rules - non-root table. */
10965 : 0 : hw_ctrl_fdb->regc_sq_items_tmpl =
10966 : 0 : flow_hw_create_ctrl_regc_sq_pattern_template(dev, error);
10967 [ # # ]: 0 : if (!hw_ctrl_fdb->regc_sq_items_tmpl) {
10968 : 0 : DRV_LOG(ERR, "port %u failed to create SQ item template for"
10969 : : " control flows", dev->data->port_id);
10970 : 0 : goto err;
10971 : : }
10972 : 0 : hw_ctrl_fdb->port_actions_tmpl =
10973 : 0 : flow_hw_create_ctrl_port_actions_template(dev, error);
10974 [ # # ]: 0 : if (!hw_ctrl_fdb->port_actions_tmpl) {
10975 : 0 : DRV_LOG(ERR, "port %u failed to create port action template"
10976 : : " for control flows", dev->data->port_id);
10977 : 0 : goto err;
10978 : : }
10979 : 0 : hw_ctrl_fdb->hw_esw_sq_miss_tbl =
10980 : 0 : flow_hw_create_ctrl_sq_miss_table
10981 : : (dev, hw_ctrl_fdb->regc_sq_items_tmpl,
10982 : : hw_ctrl_fdb->port_actions_tmpl, error);
10983 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
10984 : 0 : DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)"
10985 : : " for control flows", dev->data->port_id);
10986 : 0 : goto err;
10987 : : }
10988 : : /* Create templates and table for default FDB jump flow rules. */
10989 : 0 : hw_ctrl_fdb->port_items_tmpl =
10990 : 0 : flow_hw_create_ctrl_port_pattern_template(dev, error);
10991 [ # # ]: 0 : if (!hw_ctrl_fdb->port_items_tmpl) {
10992 : 0 : DRV_LOG(ERR, "port %u failed to create SQ item template for"
10993 : : " control flows", dev->data->port_id);
10994 : 0 : goto err;
10995 : : }
10996 : 0 : hw_ctrl_fdb->jump_one_actions_tmpl =
10997 : 0 : flow_hw_create_ctrl_jump_actions_template
10998 : : (dev, MLX5_HW_LOWEST_USABLE_GROUP, error);
10999 [ # # ]: 0 : if (!hw_ctrl_fdb->jump_one_actions_tmpl) {
11000 : 0 : DRV_LOG(ERR, "port %u failed to create jump action template"
11001 : : " for control flows", dev->data->port_id);
11002 : 0 : goto err;
11003 : : }
11004 : 0 : hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table
11005 : : (dev, hw_ctrl_fdb->port_items_tmpl,
11006 : : hw_ctrl_fdb->jump_one_actions_tmpl, error);
11007 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_esw_zero_tbl) {
11008 : 0 : DRV_LOG(ERR, "port %u failed to create table for default jump to group 1"
11009 : : " for control flows", dev->data->port_id);
11010 : 0 : goto err;
11011 : : }
11012 : : }
11013 : : /* Create LACP default miss table. */
11014 [ # # # # : 0 : if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) {
# # ]
11015 : 0 : hw_ctrl_fdb->lacp_rx_items_tmpl =
11016 : 0 : flow_hw_create_lacp_rx_pattern_template(dev, error);
11017 [ # # ]: 0 : if (!hw_ctrl_fdb->lacp_rx_items_tmpl) {
11018 : 0 : DRV_LOG(ERR, "port %u failed to create pattern template"
11019 : : " for LACP Rx traffic", dev->data->port_id);
11020 : 0 : goto err;
11021 : : }
11022 : 0 : hw_ctrl_fdb->lacp_rx_actions_tmpl =
11023 : 0 : flow_hw_create_lacp_rx_actions_template(dev, error);
11024 [ # # ]: 0 : if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) {
11025 : 0 : DRV_LOG(ERR, "port %u failed to create actions template"
11026 : : " for LACP Rx traffic", dev->data->port_id);
11027 : 0 : goto err;
11028 : : }
11029 : 0 : hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table
11030 : : (dev, hw_ctrl_fdb->lacp_rx_items_tmpl,
11031 : : hw_ctrl_fdb->lacp_rx_actions_tmpl, error);
11032 [ # # ]: 0 : if (!hw_ctrl_fdb->hw_lacp_rx_tbl) {
11033 : 0 : DRV_LOG(ERR, "port %u failed to create template table for"
11034 : : " for LACP Rx traffic", dev->data->port_id);
11035 : 0 : goto err;
11036 : : }
11037 : : }
11038 : : return 0;
11039 : :
11040 : 0 : err:
11041 : 0 : flow_hw_cleanup_ctrl_fdb_tables(dev);
11042 : 0 : return -EINVAL;
11043 : : }
11044 : :
11045 : : static void
11046 : 0 : flow_hw_cleanup_ctrl_nic_tables(struct rte_eth_dev *dev)
11047 : : {
11048 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11049 : 0 : struct mlx5_flow_hw_ctrl_nic *ctrl = priv->hw_ctrl_nic;
11050 : :
11051 [ # # ]: 0 : if (ctrl == NULL)
11052 : : return;
11053 [ # # ]: 0 : if (ctrl->hw_tx_meta_cpy_tbl)
11054 : 0 : claim_zero(flow_hw_table_destroy(dev, ctrl->hw_tx_meta_cpy_tbl, NULL));
11055 [ # # ]: 0 : if (ctrl->tx_meta_items_tmpl != NULL)
11056 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, ctrl->tx_meta_items_tmpl, NULL));
11057 [ # # ]: 0 : if (ctrl->tx_meta_actions_tmpl != NULL)
11058 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, ctrl->tx_meta_actions_tmpl, NULL));
11059 : 0 : mlx5_free(ctrl);
11060 : 0 : priv->hw_ctrl_nic = NULL;
11061 : : }
11062 : :
11063 : : static int
11064 : 0 : flow_hw_create_nic_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error)
11065 : : {
11066 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11067 : :
11068 : 0 : struct mlx5_flow_hw_ctrl_nic *ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ctrl),
11069 : : 0, SOCKET_ID_ANY);
11070 [ # # ]: 0 : if (!ctrl)
11071 : 0 : return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11072 : : "failed to allocate port control flow table");
11073 : 0 : priv->hw_ctrl_nic = ctrl;
11074 : 0 : ctrl->tx_meta_items_tmpl = flow_hw_create_tx_repr_sq_pattern_tmpl(dev, error);
11075 [ # # ]: 0 : if (ctrl->tx_meta_items_tmpl == NULL)
11076 : 0 : goto error;
11077 : 0 : ctrl->tx_meta_actions_tmpl =
11078 : 0 : flow_hw_create_tx_default_mreg_copy_actions_template(dev, error);
11079 [ # # ]: 0 : if (ctrl->tx_meta_actions_tmpl == NULL) {
11080 : 0 : rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11081 : : "failed to create default Tx metadata copy actions template");
11082 : 0 : goto error;
11083 : : }
11084 : 0 : ctrl->hw_tx_meta_cpy_tbl =
11085 : 0 : flow_hw_create_tx_default_mreg_copy_table(dev, ctrl->tx_meta_items_tmpl,
11086 : : ctrl->tx_meta_actions_tmpl, error);
11087 [ # # ]: 0 : if (ctrl->hw_tx_meta_cpy_tbl == NULL) {
11088 : 0 : rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11089 : : "failed to create default Tx metadata copy table");
11090 : : }
11091 : : return 0;
11092 : :
11093 : 0 : error:
11094 : 0 : flow_hw_cleanup_ctrl_nic_tables(dev);
11095 : 0 : return -rte_errno;
11096 : : }
11097 : :
11098 : : static void
11099 : 0 : flow_hw_ct_mng_destroy(struct rte_eth_dev *dev,
11100 : : struct mlx5_aso_ct_pools_mng *ct_mng)
11101 : : {
11102 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11103 : :
11104 : 0 : mlx5_aso_ct_queue_uninit(priv->sh, ct_mng);
11105 : 0 : mlx5_free(ct_mng);
11106 : 0 : }
11107 : :
11108 : : static void
11109 : 0 : flow_hw_ct_pool_destroy(struct rte_eth_dev *dev,
11110 : : struct mlx5_aso_ct_pool *pool)
11111 : : {
11112 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11113 : :
11114 [ # # ]: 0 : if (pool->dr_action)
11115 : 0 : mlx5dr_action_destroy(pool->dr_action);
11116 [ # # ]: 0 : if (!priv->shared_host) {
11117 [ # # ]: 0 : if (pool->devx_obj)
11118 : 0 : claim_zero(mlx5_devx_cmd_destroy(pool->devx_obj));
11119 [ # # ]: 0 : if (pool->cts)
11120 : 0 : mlx5_ipool_destroy(pool->cts);
11121 : : }
11122 : 0 : mlx5_free(pool);
11123 : 0 : }
11124 : :
11125 : : static struct mlx5_aso_ct_pool *
11126 : 0 : flow_hw_ct_pool_create(struct rte_eth_dev *dev,
11127 : : uint32_t nb_conn_tracks)
11128 : : {
11129 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
11130 : : struct mlx5_aso_ct_pool *pool;
11131 : : struct mlx5_devx_obj *obj;
11132 : : uint32_t nb_cts = rte_align32pow2(nb_conn_tracks);
11133 : : uint32_t log_obj_size = rte_log2_u32(nb_cts);
11134 : 0 : struct mlx5_indexed_pool_config cfg = {
11135 : : .size = sizeof(struct mlx5_aso_ct_action),
11136 : : .trunk_size = 1 << 12,
11137 : : .per_core_cache = 1 << 13,
11138 : : .need_lock = 1,
11139 : 0 : .release_mem_en = !!priv->sh->config.reclaim_mode,
11140 : : .malloc = mlx5_malloc,
11141 : : .free = mlx5_free,
11142 : : .type = "mlx5_hw_ct_action",
11143 : : };
11144 : : int reg_id;
11145 : : uint32_t flags = 0;
11146 : :
11147 : 0 : pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
11148 [ # # ]: 0 : if (!pool) {
11149 : 0 : rte_errno = ENOMEM;
11150 : 0 : return NULL;
11151 : : }
11152 [ # # ]: 0 : if (!priv->shared_host) {
11153 : : /*
11154 : : * No need for local cache if CT number is a small number. Since
11155 : : * flow insertion rate will be very limited in that case. Here let's
11156 : : * set the number to less than default trunk size 4K.
11157 : : */
11158 [ # # ]: 0 : if (nb_cts <= cfg.trunk_size) {
11159 : 0 : cfg.per_core_cache = 0;
11160 : 0 : cfg.trunk_size = nb_cts;
11161 [ # # ]: 0 : } else if (nb_cts <= MLX5_HW_IPOOL_SIZE_THRESHOLD) {
11162 : 0 : cfg.per_core_cache = MLX5_HW_IPOOL_CACHE_MIN;
11163 : : }
11164 : 0 : cfg.max_idx = nb_cts;
11165 : 0 : pool->cts = mlx5_ipool_create(&cfg);
11166 [ # # ]: 0 : if (!pool->cts)
11167 : 0 : goto err;
11168 : 0 : obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->cdev->ctx,
11169 : 0 : priv->sh->cdev->pdn,
11170 : : log_obj_size);
11171 [ # # ]: 0 : if (!obj) {
11172 : 0 : rte_errno = ENODATA;
11173 : 0 : DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX.");
11174 : 0 : goto err;
11175 : : }
11176 : 0 : pool->devx_obj = obj;
11177 : : } else {
11178 : : struct rte_eth_dev *host_dev = priv->shared_host;
11179 : 0 : struct mlx5_priv *host_priv = host_dev->data->dev_private;
11180 : :
11181 : 0 : pool->devx_obj = host_priv->hws_ctpool->devx_obj;
11182 : 0 : pool->cts = host_priv->hws_ctpool->cts;
11183 : : MLX5_ASSERT(pool->cts);
11184 : : MLX5_ASSERT(!nb_conn_tracks);
11185 : : }
11186 : 0 : reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, NULL);
11187 : : flags |= MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_HWS_TX;
11188 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en && priv->master) {
11189 : 0 : flags |= ((is_unified_fdb(priv)) ?
11190 : : (MLX5DR_ACTION_FLAG_HWS_FDB_RX |
11191 : : MLX5DR_ACTION_FLAG_HWS_FDB_TX |
11192 [ # # ]: 0 : MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED) :
11193 : : MLX5DR_ACTION_FLAG_HWS_FDB);
11194 : : }
11195 : 0 : pool->dr_action = mlx5dr_action_create_aso_ct(priv->dr_ctx,
11196 : 0 : (struct mlx5dr_devx_obj *)pool->devx_obj,
11197 : 0 : reg_id - REG_C_0, flags);
11198 [ # # ]: 0 : if (!pool->dr_action)
11199 : 0 : goto err;
11200 : 0 : pool->sq = priv->ct_mng->aso_sqs;
11201 : : /* Assign the last extra ASO SQ as public SQ. */
11202 : 0 : pool->shared_sq = &priv->ct_mng->aso_sqs[priv->nb_queue - 1];
11203 : 0 : return pool;
11204 : 0 : err:
11205 : 0 : flow_hw_ct_pool_destroy(dev, pool);
11206 : 0 : return NULL;
11207 : : }
11208 : :
11209 : : static int
11210 : 0 : mlx5_flow_ct_init(struct rte_eth_dev *dev,
11211 : : uint32_t nb_conn_tracks,
11212 : : uint16_t nb_queue)
11213 : : {
11214 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11215 : : uint32_t mem_size;
11216 : : int ret = -ENOMEM;
11217 : :
11218 [ # # ]: 0 : if (!priv->shared_host) {
11219 : 0 : mem_size = sizeof(struct mlx5_aso_sq) * nb_queue +
11220 : : sizeof(*priv->ct_mng);
11221 : 0 : priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
11222 : : RTE_CACHE_LINE_SIZE,
11223 : : SOCKET_ID_ANY);
11224 [ # # ]: 0 : if (!priv->ct_mng)
11225 : 0 : goto err;
11226 : 0 : ret = mlx5_aso_ct_queue_init(priv->sh, priv->ct_mng,
11227 : : nb_queue);
11228 [ # # ]: 0 : if (ret)
11229 : 0 : goto err;
11230 : : }
11231 : 0 : priv->hws_ctpool = flow_hw_ct_pool_create(dev, nb_conn_tracks);
11232 [ # # ]: 0 : if (!priv->hws_ctpool)
11233 : 0 : goto err;
11234 : 0 : priv->sh->ct_aso_en = 1;
11235 : 0 : return 0;
11236 : :
11237 : 0 : err:
11238 [ # # ]: 0 : if (priv->hws_ctpool) {
11239 : 0 : flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
11240 : 0 : priv->hws_ctpool = NULL;
11241 : : }
11242 [ # # ]: 0 : if (priv->ct_mng) {
11243 : 0 : flow_hw_ct_mng_destroy(dev, priv->ct_mng);
11244 : 0 : priv->ct_mng = NULL;
11245 : : }
11246 : : return ret;
11247 : : }
11248 : :
11249 : : void
11250 : 0 : mlx5_flow_hw_cleanup_ctrl_rx_tables(struct rte_eth_dev *dev)
11251 : : {
11252 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11253 : : unsigned int i;
11254 : : unsigned int j;
11255 : :
11256 [ # # ]: 0 : if (!priv->hw_ctrl_rx)
11257 : : return;
11258 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11259 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11260 : 0 : struct rte_flow_template_table *tbl = priv->hw_ctrl_rx->tables[i][j].tbl;
11261 : 0 : struct rte_flow_pattern_template *pt = priv->hw_ctrl_rx->tables[i][j].pt;
11262 : :
11263 [ # # ]: 0 : if (tbl)
11264 : 0 : claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11265 [ # # ]: 0 : if (pt)
11266 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, pt, NULL));
11267 : : }
11268 : : }
11269 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++i) {
11270 : 0 : struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[i];
11271 : :
11272 [ # # ]: 0 : if (at)
11273 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
11274 : : }
11275 : 0 : mlx5_free(priv->hw_ctrl_rx);
11276 : 0 : priv->hw_ctrl_rx = NULL;
11277 : : }
11278 : :
11279 : : static uint64_t
11280 : : flow_hw_ctrl_rx_rss_type_hash_types(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11281 : : {
11282 : : switch (rss_type) {
11283 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP:
11284 : : return 0;
11285 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
11286 : : return RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
11287 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11288 : : return RTE_ETH_RSS_NONFRAG_IPV4_UDP;
11289 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11290 : : return RTE_ETH_RSS_NONFRAG_IPV4_TCP;
11291 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
11292 : : return RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
11293 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11294 : : return RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX;
11295 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11296 : : return RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX;
11297 : : default:
11298 : : /* Should not reach here. */
11299 : : MLX5_ASSERT(false);
11300 : : return 0;
11301 : : }
11302 : : }
11303 : :
11304 : : static struct rte_flow_actions_template *
11305 : 0 : flow_hw_create_ctrl_rx_rss_template(struct rte_eth_dev *dev,
11306 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11307 : : {
11308 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11309 : 0 : struct rte_flow_actions_template_attr attr = {
11310 : : .ingress = 1,
11311 : : };
11312 : : uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
11313 : 0 : struct rte_flow_action_rss rss_conf = {
11314 : : .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
11315 : : .level = 0,
11316 : : .types = 0,
11317 : 0 : .key_len = priv->rss_conf.rss_key_len,
11318 : 0 : .key = priv->rss_conf.rss_key,
11319 : 0 : .queue_num = priv->reta_idx_n,
11320 : : .queue = queue,
11321 : : };
11322 : 0 : struct rte_flow_action actions[] = {
11323 : : {
11324 : : .type = RTE_FLOW_ACTION_TYPE_RSS,
11325 : : .conf = &rss_conf,
11326 : : },
11327 : : {
11328 : : .type = RTE_FLOW_ACTION_TYPE_END,
11329 : : }
11330 : : };
11331 [ # # ]: 0 : struct rte_flow_action masks[] = {
11332 : : {
11333 : : .type = RTE_FLOW_ACTION_TYPE_RSS,
11334 : : .conf = &rss_conf,
11335 : : },
11336 : : {
11337 : : .type = RTE_FLOW_ACTION_TYPE_END,
11338 : : }
11339 : : };
11340 : : struct rte_flow_actions_template *at;
11341 : : struct rte_flow_error error;
11342 : : unsigned int i;
11343 : :
11344 : : MLX5_ASSERT(priv->reta_idx_n > 0 && priv->reta_idx);
11345 : : /* Select proper RSS hash types and based on that configure the actions template. */
11346 : 0 : rss_conf.types = flow_hw_ctrl_rx_rss_type_hash_types(rss_type);
11347 [ # # ]: 0 : if (rss_conf.types) {
11348 [ # # ]: 0 : for (i = 0; i < priv->reta_idx_n; ++i)
11349 : 0 : queue[i] = (*priv->reta_idx)[i];
11350 : : } else {
11351 : 0 : rss_conf.queue_num = 1;
11352 : 0 : queue[0] = (*priv->reta_idx)[0];
11353 : : }
11354 : : at = flow_hw_actions_template_create(dev, &attr, actions, masks, &error);
11355 [ # # ]: 0 : if (!at)
11356 [ # # ]: 0 : DRV_LOG(ERR,
11357 : : "Failed to create ctrl flow actions template: rte_errno(%d), type(%d): %s",
11358 : : rte_errno, error.type,
11359 : : error.message ? error.message : "(no stated reason)");
11360 : 0 : return at;
11361 : : }
11362 : :
11363 : : static uint32_t ctrl_rx_rss_priority_map[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX] = {
11364 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP] = MLX5_HW_CTRL_RX_PRIO_L2,
11365 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4] = MLX5_HW_CTRL_RX_PRIO_L3,
11366 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11367 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11368 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6] = MLX5_HW_CTRL_RX_PRIO_L3,
11369 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP] = MLX5_HW_CTRL_RX_PRIO_L4,
11370 : : [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP] = MLX5_HW_CTRL_RX_PRIO_L4,
11371 : : };
11372 : :
11373 : : static uint32_t ctrl_rx_nb_flows_map[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] = {
11374 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL] = 1,
11375 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST] = 1,
11376 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST] = 1,
11377 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11378 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST] = 1,
11379 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11380 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST] = 1,
11381 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN] = MLX5_MAX_VLAN_IDS,
11382 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC] = MLX5_MAX_UC_MAC_ADDRESSES,
11383 : : [MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN] =
11384 : : MLX5_MAX_UC_MAC_ADDRESSES * MLX5_MAX_VLAN_IDS,
11385 : : };
11386 : :
11387 : : static struct rte_flow_template_table_attr
11388 : : flow_hw_get_ctrl_rx_table_attr(enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11389 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11390 : : {
11391 : 0 : return (struct rte_flow_template_table_attr){
11392 : : .flow_attr = {
11393 : : .group = 0,
11394 : 0 : .priority = ctrl_rx_rss_priority_map[rss_type],
11395 : : .ingress = 1,
11396 : : },
11397 : 0 : .nb_flows = ctrl_rx_nb_flows_map[eth_pattern_type],
11398 : : };
11399 : : }
11400 : :
11401 : : static struct rte_flow_item
11402 : : flow_hw_get_ctrl_rx_eth_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11403 : : {
11404 : : struct rte_flow_item item = {
11405 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
11406 : : .mask = NULL,
11407 : : };
11408 : :
11409 : 0 : switch (eth_pattern_type) {
11410 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
11411 : : item.mask = &ctrl_rx_eth_promisc_mask;
11412 : : break;
11413 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
11414 : : item.mask = &ctrl_rx_eth_mcast_mask;
11415 : 0 : break;
11416 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
11417 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11418 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
11419 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11420 : : item.mask = &ctrl_rx_eth_dmac_mask;
11421 : 0 : break;
11422 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
11423 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11424 : : item.mask = &ctrl_rx_eth_ipv4_mcast_mask;
11425 : 0 : break;
11426 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
11427 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11428 : : item.mask = &ctrl_rx_eth_ipv6_mcast_mask;
11429 : 0 : break;
11430 : 0 : default:
11431 : : /* Should not reach here - ETH mask must be present. */
11432 : : item.type = RTE_FLOW_ITEM_TYPE_END;
11433 : : MLX5_ASSERT(false);
11434 : 0 : break;
11435 : : }
11436 : 0 : return item;
11437 : : }
11438 : :
11439 : : static struct rte_flow_item
11440 : : flow_hw_get_ctrl_rx_vlan_item(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
11441 : : {
11442 : : struct rte_flow_item item = {
11443 : : .type = RTE_FLOW_ITEM_TYPE_VOID,
11444 : : .mask = NULL,
11445 : : };
11446 : :
11447 [ # # ]: 0 : switch (eth_pattern_type) {
11448 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
11449 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
11450 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
11451 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
11452 : : item.type = RTE_FLOW_ITEM_TYPE_VLAN;
11453 : : item.mask = &rte_flow_item_vlan_mask;
11454 : 0 : break;
11455 : : default:
11456 : : /* Nothing to update. */
11457 : : break;
11458 : : }
11459 : 0 : return item;
11460 : : }
11461 : :
11462 : : static struct rte_flow_item
11463 : : flow_hw_get_ctrl_rx_l3_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11464 : : {
11465 : : struct rte_flow_item item = {
11466 : : .type = RTE_FLOW_ITEM_TYPE_VOID,
11467 : : .mask = NULL,
11468 : : };
11469 : :
11470 [ # # # ]: 0 : switch (rss_type) {
11471 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4:
11472 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11473 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11474 : : item.type = RTE_FLOW_ITEM_TYPE_IPV4;
11475 : 0 : break;
11476 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6:
11477 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11478 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11479 : : item.type = RTE_FLOW_ITEM_TYPE_IPV6;
11480 : 0 : break;
11481 : : default:
11482 : : /* Nothing to update. */
11483 : : break;
11484 : : }
11485 : 0 : return item;
11486 : : }
11487 : :
11488 : : static struct rte_flow_item
11489 : : flow_hw_get_ctrl_rx_l4_item(const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11490 : : {
11491 : : struct rte_flow_item item = {
11492 : : .type = RTE_FLOW_ITEM_TYPE_VOID,
11493 : : .mask = NULL,
11494 : : };
11495 : :
11496 [ # # # ]: 0 : switch (rss_type) {
11497 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP:
11498 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP:
11499 : : item.type = RTE_FLOW_ITEM_TYPE_UDP;
11500 : 0 : break;
11501 : 0 : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP:
11502 : : case MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP:
11503 : : item.type = RTE_FLOW_ITEM_TYPE_TCP;
11504 : 0 : break;
11505 : : default:
11506 : : /* Nothing to update. */
11507 : : break;
11508 : : }
11509 : 0 : return item;
11510 : : }
11511 : :
11512 : : static struct rte_flow_pattern_template *
11513 : 0 : flow_hw_create_ctrl_rx_pattern_template
11514 : : (struct rte_eth_dev *dev,
11515 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
11516 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
11517 : : {
11518 : 0 : const struct rte_flow_pattern_template_attr attr = {
11519 : : .relaxed_matching = 0,
11520 : : .ingress = 1,
11521 : : };
11522 [ # # # # : 0 : struct rte_flow_item items[] = {
# # ]
11523 : : /* Matching patterns */
11524 : : flow_hw_get_ctrl_rx_eth_item(eth_pattern_type),
11525 : : flow_hw_get_ctrl_rx_vlan_item(eth_pattern_type),
11526 : : flow_hw_get_ctrl_rx_l3_item(rss_type),
11527 : : flow_hw_get_ctrl_rx_l4_item(rss_type),
11528 : : /* Terminate pattern */
11529 : : { .type = RTE_FLOW_ITEM_TYPE_END }
11530 : : };
11531 : :
11532 : 0 : return flow_hw_pattern_template_create(dev, &attr, items, false, NULL);
11533 : : }
11534 : :
11535 : : int
11536 : 0 : mlx5_flow_hw_create_ctrl_rx_tables(struct rte_eth_dev *dev)
11537 : : {
11538 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11539 : : unsigned int i;
11540 : : unsigned int j;
11541 : : int ret;
11542 : :
11543 : : MLX5_ASSERT(!priv->hw_ctrl_rx);
11544 : 0 : priv->hw_ctrl_rx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*priv->hw_ctrl_rx),
11545 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
11546 [ # # ]: 0 : if (!priv->hw_ctrl_rx) {
11547 : 0 : DRV_LOG(ERR, "Failed to allocate memory for Rx control flow tables");
11548 : 0 : rte_errno = ENOMEM;
11549 : 0 : return -rte_errno;
11550 : : }
11551 : : /* Create all pattern template variants. */
11552 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11553 : : enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
11554 : :
11555 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11556 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
11557 : : struct rte_flow_template_table_attr attr;
11558 : : struct rte_flow_pattern_template *pt;
11559 : :
11560 : : attr = flow_hw_get_ctrl_rx_table_attr(eth_pattern_type, rss_type);
11561 : 0 : pt = flow_hw_create_ctrl_rx_pattern_template(dev, eth_pattern_type,
11562 : : rss_type);
11563 [ # # ]: 0 : if (!pt)
11564 : 0 : goto err;
11565 : 0 : priv->hw_ctrl_rx->tables[i][j].attr = attr;
11566 : 0 : priv->hw_ctrl_rx->tables[i][j].pt = pt;
11567 : : }
11568 : : }
11569 : : return 0;
11570 : : err:
11571 : 0 : ret = rte_errno;
11572 : 0 : return -ret;
11573 : : }
11574 : :
11575 : : void
11576 : 0 : mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev)
11577 : : {
11578 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11579 : : struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
11580 : : unsigned int i;
11581 : : unsigned int j;
11582 : :
11583 [ # # ]: 0 : if (!priv->dr_ctx)
11584 : : return;
11585 [ # # ]: 0 : if (!priv->hw_ctrl_rx)
11586 : : return;
11587 : : hw_ctrl_rx = priv->hw_ctrl_rx;
11588 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
11589 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11590 : : struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
11591 : :
11592 [ # # ]: 0 : if (tmpls->tbl) {
11593 : 0 : claim_zero(flow_hw_table_destroy(dev, tmpls->tbl, NULL));
11594 : 0 : tmpls->tbl = NULL;
11595 : : }
11596 : : }
11597 : : }
11598 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
11599 [ # # ]: 0 : if (hw_ctrl_rx->rss[j]) {
11600 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_rx->rss[j], NULL));
11601 : 0 : hw_ctrl_rx->rss[j] = NULL;
11602 : : }
11603 : : }
11604 : : }
11605 : :
11606 : : /**
11607 : : * Copy the provided HWS configuration to a newly allocated buffer.
11608 : : *
11609 : : * @param[in] port_attr
11610 : : * Port configuration attributes.
11611 : : * @param[in] nb_queue
11612 : : * Number of queue.
11613 : : * @param[in] queue_attr
11614 : : * Array that holds attributes for each flow queue.
11615 : : * @param[in] nt_mode
11616 : : * Non template mode.
11617 : : *
11618 : : * @return
11619 : : * Pointer to copied HWS configuration is returned on success.
11620 : : * Otherwise, NULL is returned and rte_errno is set.
11621 : : */
11622 : : static struct mlx5_flow_hw_attr *
11623 : 0 : flow_hw_alloc_copy_config(const struct rte_flow_port_attr *port_attr,
11624 : : const uint16_t nb_queue,
11625 : : const struct rte_flow_queue_attr *queue_attr[],
11626 : : bool nt_mode,
11627 : : struct rte_flow_error *error)
11628 : : {
11629 : : struct mlx5_flow_hw_attr *hw_attr;
11630 : : size_t hw_attr_size;
11631 : : unsigned int i;
11632 : :
11633 : 0 : hw_attr_size = sizeof(*hw_attr) + nb_queue * sizeof(*hw_attr->queue_attr);
11634 : 0 : hw_attr = mlx5_malloc(MLX5_MEM_ZERO, hw_attr_size, 0, SOCKET_ID_ANY);
11635 [ # # ]: 0 : if (!hw_attr) {
11636 : 0 : rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11637 : : "Not enough memory to store configuration");
11638 : 0 : return NULL;
11639 : : }
11640 : 0 : memcpy(&hw_attr->port_attr, port_attr, sizeof(*port_attr));
11641 : 0 : hw_attr->nb_queue = nb_queue;
11642 : : /* Queue attributes are placed after the mlx5_flow_hw_attr. */
11643 : 0 : hw_attr->queue_attr = (struct rte_flow_queue_attr *)(hw_attr + 1);
11644 [ # # ]: 0 : for (i = 0; i < nb_queue; ++i)
11645 : 0 : memcpy(&hw_attr->queue_attr[i], queue_attr[i], sizeof(hw_attr->queue_attr[i]));
11646 : 0 : hw_attr->nt_mode = nt_mode;
11647 : 0 : return hw_attr;
11648 : : }
11649 : :
11650 : : /**
11651 : : * Compares the preserved HWS configuration with the provided one.
11652 : : *
11653 : : * @param[in] hw_attr
11654 : : * Pointer to preserved HWS configuration.
11655 : : * @param[in] new_pa
11656 : : * Port configuration attributes to compare.
11657 : : * @param[in] new_nbq
11658 : : * Number of queues to compare.
11659 : : * @param[in] new_qa
11660 : : * Array that holds attributes for each flow queue.
11661 : : *
11662 : : * @return
11663 : : * True if configurations are the same, false otherwise.
11664 : : */
11665 : : static bool
11666 : 0 : flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
11667 : : const struct rte_flow_port_attr *new_pa,
11668 : : const uint16_t new_nbq,
11669 : : const struct rte_flow_queue_attr *new_qa[])
11670 : : {
11671 : : const struct rte_flow_port_attr *old_pa = &hw_attr->port_attr;
11672 : 0 : const uint16_t old_nbq = hw_attr->nb_queue;
11673 : 0 : const struct rte_flow_queue_attr *old_qa = hw_attr->queue_attr;
11674 : : unsigned int i;
11675 : :
11676 [ # # ]: 0 : if (old_pa->nb_counters != new_pa->nb_counters ||
11677 [ # # ]: 0 : old_pa->nb_aging_objects != new_pa->nb_aging_objects ||
11678 [ # # ]: 0 : old_pa->nb_meters != new_pa->nb_meters ||
11679 [ # # ]: 0 : old_pa->nb_conn_tracks != new_pa->nb_conn_tracks ||
11680 [ # # ]: 0 : old_pa->flags != new_pa->flags)
11681 : : return false;
11682 [ # # ]: 0 : if (old_nbq != new_nbq)
11683 : : return false;
11684 [ # # ]: 0 : for (i = 0; i < old_nbq; ++i)
11685 [ # # ]: 0 : if (old_qa[i].size != new_qa[i]->size)
11686 : : return false;
11687 : : return true;
11688 : : }
11689 : :
11690 : : /*
11691 : : * No need to explicitly release drop action templates on port stop.
11692 : : * Drop action templates release with other action templates during
11693 : : * mlx5_dev_close -> flow_hw_resource_release -> flow_hw_actions_template_destroy
11694 : : */
11695 : : static void
11696 : 0 : flow_hw_action_template_drop_release(struct rte_eth_dev *dev)
11697 : : {
11698 : : int i;
11699 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11700 : :
11701 [ # # ]: 0 : for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
11702 [ # # ]: 0 : if (!priv->action_template_drop[i])
11703 : 0 : continue;
11704 : 0 : flow_hw_actions_template_destroy(dev,
11705 : : priv->action_template_drop[i],
11706 : : NULL);
11707 : 0 : priv->action_template_drop[i] = NULL;
11708 : : }
11709 : 0 : }
11710 : :
11711 : : static int
11712 : 0 : flow_hw_action_template_drop_init(struct rte_eth_dev *dev,
11713 : : struct rte_flow_error *error)
11714 : : {
11715 : : uint32_t i, from, to;
11716 : 0 : const struct rte_flow_action drop[2] = {
11717 : : [0] = { .type = RTE_FLOW_ACTION_TYPE_DROP },
11718 : : [1] = { .type = RTE_FLOW_ACTION_TYPE_END },
11719 : : };
11720 : : const struct rte_flow_action *actions = drop;
11721 : : const struct rte_flow_action *masks = drop;
11722 : 0 : const struct rte_flow_actions_template_attr attr[MLX5DR_TABLE_TYPE_MAX] = {
11723 : : [MLX5DR_TABLE_TYPE_NIC_RX] = { .ingress = 1 },
11724 : : [MLX5DR_TABLE_TYPE_NIC_TX] = { .egress = 1 },
11725 : : [MLX5DR_TABLE_TYPE_FDB] = { .transfer = 1 },
11726 : : [MLX5DR_TABLE_TYPE_FDB_RX] = { .transfer = 1 },
11727 : : [MLX5DR_TABLE_TYPE_FDB_TX] = { .transfer = 1 },
11728 : : [MLX5DR_TABLE_TYPE_FDB_UNIFIED] = { .transfer = 1 },
11729 : : };
11730 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11731 : :
11732 : : from = MLX5DR_TABLE_TYPE_NIC_RX;
11733 : : to = MLX5DR_TABLE_TYPE_NIC_TX;
11734 [ # # ]: 0 : for (i = from; i <= to; i++) {
11735 : 0 : priv->action_template_drop[i] =
11736 : 0 : flow_hw_actions_template_create(dev, &attr[i], actions, masks, error);
11737 [ # # ]: 0 : if (!priv->action_template_drop[i])
11738 : : return -1;
11739 : : }
11740 : :
11741 [ # # # # ]: 0 : if (!(priv->sh->config.dv_esw_en && priv->master))
11742 : : return 0;
11743 : :
11744 : : from = MLX5DR_TABLE_TYPE_FDB;
11745 [ # # ]: 0 : to = is_unified_fdb(priv) ? MLX5DR_TABLE_TYPE_FDB_UNIFIED : MLX5DR_TABLE_TYPE_FDB;
11746 [ # # ]: 0 : for (i = from; i <= to; i++) {
11747 : 0 : priv->action_template_drop[i] =
11748 : 0 : flow_hw_actions_template_create(dev, &attr[i], actions, masks, error);
11749 [ # # ]: 0 : if (!priv->action_template_drop[i])
11750 : : return -1;
11751 : : }
11752 : : return 0;
11753 : : }
11754 : :
11755 : : static void
11756 : 0 : __mlx5_flow_hw_resource_release(struct rte_eth_dev *dev, bool ctx_close)
11757 : : {
11758 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11759 : : struct rte_flow_template_table *tbl, *temp_tbl;
11760 : : struct rte_flow_pattern_template *it, *temp_it;
11761 : : struct rte_flow_actions_template *at, *temp_at;
11762 : : struct mlx5_flow_group *grp, *temp_grp;
11763 : : uint32_t i;
11764 : :
11765 : 0 : mlx5_flow_hw_rxq_flag_set(dev, false);
11766 : 0 : flow_hw_flush_all_ctrl_flows(dev);
11767 : 0 : flow_hw_cleanup_ctrl_fdb_tables(dev);
11768 : 0 : flow_hw_cleanup_ctrl_nic_tables(dev);
11769 : 0 : flow_hw_cleanup_tx_repr_tagging(dev);
11770 : 0 : flow_hw_action_template_drop_release(dev);
11771 : 0 : grp = LIST_FIRST(&priv->flow_hw_grp);
11772 [ # # ]: 0 : while (grp) {
11773 : 0 : temp_grp = LIST_NEXT(grp, next);
11774 : 0 : claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL));
11775 : : grp = temp_grp;
11776 : : }
11777 : 0 : tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
11778 [ # # ]: 0 : while (tbl) {
11779 : 0 : temp_tbl = LIST_NEXT(tbl, next);
11780 : 0 : claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11781 : : tbl = temp_tbl;
11782 : : }
11783 : 0 : tbl = LIST_FIRST(&priv->flow_hw_tbl);
11784 [ # # ]: 0 : while (tbl) {
11785 : 0 : temp_tbl = LIST_NEXT(tbl, next);
11786 : 0 : claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
11787 : : tbl = temp_tbl;
11788 : : }
11789 : 0 : it = LIST_FIRST(&priv->flow_hw_itt);
11790 [ # # ]: 0 : while (it) {
11791 : 0 : temp_it = LIST_NEXT(it, next);
11792 : 0 : claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL));
11793 : : it = temp_it;
11794 : : }
11795 : 0 : at = LIST_FIRST(&priv->flow_hw_at);
11796 [ # # ]: 0 : while (at) {
11797 : 0 : temp_at = LIST_NEXT(at, next);
11798 : 0 : claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
11799 : : at = temp_at;
11800 : : }
11801 : 0 : flow_hw_free_vport_actions(priv);
11802 [ # # ]: 0 : if (priv->acts_ipool) {
11803 : 0 : mlx5_ipool_destroy(priv->acts_ipool);
11804 : 0 : priv->acts_ipool = NULL;
11805 : : }
11806 [ # # ]: 0 : if (priv->hws_age_req)
11807 : 0 : mlx5_hws_age_pool_destroy(priv);
11808 [ # # # # ]: 0 : if (!priv->shared_host && priv->hws_cpool) {
11809 : 0 : mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
11810 : 0 : priv->hws_cpool = NULL;
11811 : : }
11812 [ # # ]: 0 : if (priv->hws_ctpool) {
11813 : 0 : flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
11814 : 0 : priv->hws_ctpool = NULL;
11815 : : }
11816 [ # # ]: 0 : if (priv->ct_mng) {
11817 : 0 : flow_hw_ct_mng_destroy(dev, priv->ct_mng);
11818 : 0 : priv->ct_mng = NULL;
11819 : : }
11820 : 0 : mlx5_flow_quota_destroy(dev);
11821 : 0 : mlx5_hws_global_actions_cleanup(priv);
11822 [ # # ]: 0 : if (priv->hw_q) {
11823 [ # # ]: 0 : for (i = 0; i < priv->nb_queue; i++) {
11824 : 0 : struct mlx5_hw_q *hwq = &priv->hw_q[i];
11825 : 0 : rte_ring_free(hwq->indir_iq);
11826 : 0 : rte_ring_free(hwq->indir_cq);
11827 : 0 : rte_ring_free(hwq->flow_transfer_pending);
11828 : 0 : rte_ring_free(hwq->flow_transfer_completed);
11829 : : }
11830 : 0 : mlx5_free(priv->hw_q);
11831 : 0 : priv->hw_q = NULL;
11832 : : }
11833 [ # # ]: 0 : if (ctx_close) {
11834 [ # # ]: 0 : if (priv->dr_ctx) {
11835 : 0 : claim_zero(mlx5dr_context_close(priv->dr_ctx));
11836 : 0 : priv->dr_ctx = NULL;
11837 : : }
11838 : : }
11839 [ # # ]: 0 : if (priv->shared_host) {
11840 : 0 : struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
11841 : 0 : rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
11842 : : rte_memory_order_relaxed);
11843 : 0 : priv->shared_host = NULL;
11844 : : }
11845 [ # # ]: 0 : if (priv->hw_attr) {
11846 : 0 : mlx5_free(priv->hw_attr);
11847 : 0 : priv->hw_attr = NULL;
11848 : : }
11849 : 0 : priv->nb_queue = 0;
11850 : 0 : }
11851 : :
11852 : : static __rte_always_inline struct rte_ring *
11853 : : mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char *str)
11854 : : {
11855 : : char mz_name[RTE_MEMZONE_NAMESIZE];
11856 : :
11857 : : snprintf(mz_name, sizeof(mz_name), "port_%u_%s_%u", port_id, str, queue);
11858 : 0 : return rte_ring_create(mz_name, size, SOCKET_ID_ANY,
11859 : : RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
11860 : : }
11861 : :
11862 : : static int
11863 : 0 : flow_hw_queue_setup_rings(struct rte_eth_dev *dev,
11864 : : uint16_t queue,
11865 : : uint32_t queue_size,
11866 : : bool nt_mode)
11867 : : {
11868 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11869 : :
11870 : : /* HWS queue info container must be already allocated. */
11871 : : MLX5_ASSERT(priv->hw_q != NULL);
11872 : :
11873 : : /* Notice ring name length is limited. */
11874 : 0 : priv->hw_q[queue].indir_cq = mlx5_hwq_ring_create
11875 : 0 : (dev->data->port_id, queue, queue_size, "indir_act_cq");
11876 [ # # ]: 0 : if (!priv->hw_q[queue].indir_cq) {
11877 : 0 : DRV_LOG(ERR, "port %u failed to allocate indir_act_cq ring for HWS",
11878 : : dev->data->port_id);
11879 : 0 : return -ENOMEM;
11880 : : }
11881 : :
11882 : 0 : priv->hw_q[queue].indir_iq = mlx5_hwq_ring_create
11883 : 0 : (dev->data->port_id, queue, queue_size, "indir_act_iq");
11884 [ # # ]: 0 : if (!priv->hw_q[queue].indir_iq) {
11885 : 0 : DRV_LOG(ERR, "port %u failed to allocate indir_act_iq ring for HWS",
11886 : : dev->data->port_id);
11887 : 0 : return -ENOMEM;
11888 : : }
11889 : :
11890 : : /*
11891 : : * Sync flow API does not require rings used for table resize handling,
11892 : : * because these rings are only used through async flow APIs.
11893 : : */
11894 [ # # ]: 0 : if (nt_mode)
11895 : : return 0;
11896 : :
11897 : 0 : priv->hw_q[queue].flow_transfer_pending = mlx5_hwq_ring_create
11898 : 0 : (dev->data->port_id, queue, queue_size, "tx_pending");
11899 [ # # ]: 0 : if (!priv->hw_q[queue].flow_transfer_pending) {
11900 : 0 : DRV_LOG(ERR, "port %u failed to allocate tx_pending ring for HWS",
11901 : : dev->data->port_id);
11902 : 0 : return -ENOMEM;
11903 : : }
11904 : :
11905 : 0 : priv->hw_q[queue].flow_transfer_completed = mlx5_hwq_ring_create
11906 : 0 : (dev->data->port_id, queue, queue_size, "tx_done");
11907 [ # # ]: 0 : if (!priv->hw_q[queue].flow_transfer_completed) {
11908 : 0 : DRV_LOG(ERR, "port %u failed to allocate tx_done ring for HWS",
11909 : : dev->data->port_id);
11910 : 0 : return -ENOMEM;
11911 : : }
11912 : :
11913 : : return 0;
11914 : : }
11915 : :
11916 : : static int
11917 : 0 : flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
11918 : : uint16_t nb_queue,
11919 : : const struct rte_flow_queue_attr *queue_attr[],
11920 : : bool nt_mode, struct rte_flow_error *error)
11921 : : {
11922 : : uint32_t size;
11923 : : unsigned int i;
11924 : :
11925 [ # # ]: 0 : if (port_attr == NULL)
11926 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11927 : : "Port attributes must be non-NULL");
11928 : :
11929 [ # # ]: 0 : if (nb_queue == 0 && !nt_mode)
11930 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11931 : : "At least one flow queue is required");
11932 : :
11933 [ # # ]: 0 : if (queue_attr == NULL)
11934 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11935 : : "Queue attributes must be non-NULL");
11936 : :
11937 : 0 : size = queue_attr[0]->size;
11938 [ # # ]: 0 : for (i = 1; i < nb_queue; ++i) {
11939 [ # # ]: 0 : if (queue_attr[i]->size != size)
11940 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11941 : : NULL,
11942 : : "All flow queues must have the same size");
11943 : : }
11944 : :
11945 : : return 0;
11946 : : }
11947 : :
11948 : : /**
11949 : : * Configure port HWS resources.
11950 : : *
11951 : : * @param[in] dev
11952 : : * Pointer to the rte_eth_dev structure.
11953 : : * @param[in] port_attr
11954 : : * Port configuration attributes.
11955 : : * @param[in] nb_queue
11956 : : * Number of queue.
11957 : : * @param[in] queue_attr
11958 : : * Array that holds attributes for each flow queue.
11959 : : * @param[in] nt_mode
11960 : : * Non-template mode.
11961 : : * @param[out] error
11962 : : * Pointer to error structure.
11963 : : *
11964 : : * @return
11965 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
11966 : : */
11967 : : static int
11968 : 0 : __flow_hw_configure(struct rte_eth_dev *dev,
11969 : : const struct rte_flow_port_attr *port_attr,
11970 : : uint16_t nb_queue,
11971 : : const struct rte_flow_queue_attr *queue_attr[],
11972 : : bool nt_mode,
11973 : : struct rte_flow_error *error)
11974 : : {
11975 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
11976 : : struct mlx5_priv *host_priv = NULL;
11977 : 0 : struct mlx5dr_context_attr dr_ctx_attr = {0};
11978 : : struct mlx5_hw_q *hw_q;
11979 : : struct mlx5_hw_q_job *job = NULL;
11980 : : uint32_t mem_size, i, j;
11981 : 0 : struct mlx5_indexed_pool_config cfg = {
11982 : : .size = sizeof(struct mlx5_action_construct_data),
11983 : : .trunk_size = 4096,
11984 : : .need_lock = 1,
11985 : 0 : .release_mem_en = !!priv->sh->config.reclaim_mode,
11986 : : .malloc = mlx5_malloc,
11987 : : .free = mlx5_free,
11988 : : .type = "mlx5_hw_action_construct_data",
11989 : : };
11990 : : /*
11991 : : * Adds one queue to be used by PMD.
11992 : : * The last queue will be used by the PMD.
11993 : : */
11994 : : uint16_t nb_q_updated = 0;
11995 : : struct rte_flow_queue_attr **_queue_attr = NULL;
11996 : 0 : struct rte_flow_queue_attr ctrl_queue_attr = {0};
11997 [ # # # # ]: 0 : bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master);
11998 : : int ret = 0;
11999 : : bool strict_queue = false;
12000 : :
12001 : 0 : error->type = RTE_FLOW_ERROR_TYPE_NONE;
12002 [ # # ]: 0 : if (mlx5dr_rule_get_handle_size() != MLX5_DR_RULE_SIZE) {
12003 : 0 : rte_errno = EINVAL;
12004 : 0 : goto err;
12005 : : }
12006 [ # # ]: 0 : if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, nt_mode, error))
12007 : 0 : return -rte_errno;
12008 : : /*
12009 : : * Calling rte_flow_configure() again is allowed if
12010 : : * provided configuration matches the initially provided one,
12011 : : * or previous configuration was default non template one.
12012 : : */
12013 [ # # ]: 0 : if (priv->dr_ctx) {
12014 : : MLX5_ASSERT(priv->hw_attr != NULL);
12015 [ # # ]: 0 : for (i = 0; i < priv->nb_queue; i++) {
12016 : 0 : hw_q = &priv->hw_q[i];
12017 : : /* Make sure all queues are empty. */
12018 [ # # ]: 0 : if (hw_q->size != hw_q->job_idx) {
12019 : 0 : rte_errno = EBUSY;
12020 : 0 : goto err;
12021 : : }
12022 : : }
12023 : : /* If previous configuration was not default non template mode config. */
12024 [ # # ]: 0 : if (!priv->hw_attr->nt_mode) {
12025 [ # # ]: 0 : if (flow_hw_compare_config(priv->hw_attr, port_attr, nb_queue, queue_attr))
12026 : : return 0;
12027 : : else
12028 : 0 : return rte_flow_error_set(error, ENOTSUP,
12029 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12030 : : "Changing HWS configuration attributes "
12031 : : "is not supported");
12032 : : }
12033 : : /* Reconfiguration, need to release all resources from previous allocation. */
12034 : 0 : __mlx5_flow_hw_resource_release(dev, true);
12035 : : }
12036 : 0 : priv->hw_attr = flow_hw_alloc_copy_config(port_attr, nb_queue, queue_attr, nt_mode, error);
12037 [ # # ]: 0 : if (!priv->hw_attr) {
12038 : 0 : ret = -rte_errno;
12039 : 0 : goto err;
12040 : : }
12041 : 0 : ctrl_queue_attr.size = queue_attr[0]->size;
12042 : 0 : nb_q_updated = nb_queue + 1;
12043 : 0 : _queue_attr = mlx5_malloc(MLX5_MEM_ZERO,
12044 : : nb_q_updated *
12045 : : sizeof(struct rte_flow_queue_attr *),
12046 : : 64, SOCKET_ID_ANY);
12047 [ # # ]: 0 : if (!_queue_attr) {
12048 : 0 : rte_errno = ENOMEM;
12049 : 0 : goto err;
12050 : : }
12051 : :
12052 : 0 : memcpy(_queue_attr, queue_attr, sizeof(void *) * nb_queue);
12053 : 0 : _queue_attr[nb_queue] = &ctrl_queue_attr;
12054 : 0 : priv->acts_ipool = mlx5_ipool_create(&cfg);
12055 [ # # ]: 0 : if (!priv->acts_ipool)
12056 : 0 : goto err;
12057 : : /* Allocate the queue job descriptor LIFO. */
12058 : 0 : mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
12059 [ # # ]: 0 : for (i = 0; i < nb_q_updated; i++) {
12060 : 0 : mem_size += (sizeof(struct mlx5_hw_q_job *) +
12061 : 0 : sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;
12062 : : }
12063 : 0 : priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
12064 : : 64, SOCKET_ID_ANY);
12065 [ # # ]: 0 : if (!priv->hw_q) {
12066 : 0 : rte_errno = ENOMEM;
12067 : 0 : goto err;
12068 : : }
12069 [ # # ]: 0 : for (i = 0; i < nb_q_updated; i++) {
12070 : 0 : priv->hw_q[i].job_idx = _queue_attr[i]->size;
12071 : 0 : priv->hw_q[i].size = _queue_attr[i]->size;
12072 : 0 : priv->hw_q[i].ongoing_flow_ops = 0;
12073 [ # # ]: 0 : if (i == 0)
12074 : 0 : priv->hw_q[i].job = (struct mlx5_hw_q_job **)
12075 : 0 : &priv->hw_q[nb_q_updated];
12076 : : else
12077 : 0 : priv->hw_q[i].job = (struct mlx5_hw_q_job **)&job[_queue_attr[i - 1]->size];
12078 : 0 : job = (struct mlx5_hw_q_job *)
12079 : 0 : &priv->hw_q[i].job[_queue_attr[i]->size];
12080 [ # # ]: 0 : for (j = 0; j < _queue_attr[i]->size; j++)
12081 : 0 : priv->hw_q[i].job[j] = &job[j];
12082 : :
12083 [ # # ]: 0 : if (flow_hw_queue_setup_rings(dev, i, _queue_attr[i]->size, nt_mode) < 0)
12084 : 0 : goto err;
12085 : : }
12086 : 0 : dr_ctx_attr.pd = priv->sh->cdev->pd;
12087 : 0 : dr_ctx_attr.queues = nb_q_updated;
12088 : : /* Assign initial value of STC numbers for representors. */
12089 [ # # ]: 0 : if (priv->representor)
12090 : 0 : dr_ctx_attr.initial_log_stc_memory = MLX5_REPR_STC_MEMORY_LOG;
12091 : : /* Queue size should all be the same. Take the first one. */
12092 : 0 : dr_ctx_attr.queue_size = _queue_attr[0]->size;
12093 [ # # ]: 0 : if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
12094 : : struct rte_eth_dev *host_dev = NULL;
12095 : : uint16_t port_id;
12096 : :
12097 : : MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
12098 [ # # ]: 0 : if (is_proxy) {
12099 : 0 : DRV_LOG(ERR, "cross vHCA shared mode not supported "
12100 : : "for E-Switch confgiurations");
12101 : 0 : rte_errno = ENOTSUP;
12102 : 0 : goto err;
12103 : : }
12104 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
12105 [ # # ]: 0 : if (port_id == port_attr->host_port_id) {
12106 : 0 : host_dev = &rte_eth_devices[port_id];
12107 : 0 : break;
12108 : : }
12109 : : }
12110 [ # # ]: 0 : if (!host_dev || host_dev == dev ||
12111 [ # # # # ]: 0 : !host_dev->data || !host_dev->data->dev_private) {
12112 : 0 : DRV_LOG(ERR, "Invalid cross vHCA host port %u",
12113 : : port_attr->host_port_id);
12114 : 0 : rte_errno = EINVAL;
12115 : 0 : goto err;
12116 : : }
12117 : : host_priv = host_dev->data->dev_private;
12118 [ # # ]: 0 : if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
12119 : 0 : DRV_LOG(ERR, "Sibling ports %u and %u do not "
12120 : : "require cross vHCA sharing mode",
12121 : : dev->data->port_id, port_attr->host_port_id);
12122 : 0 : rte_errno = EINVAL;
12123 : 0 : goto err;
12124 : : }
12125 [ # # ]: 0 : if (host_priv->shared_host) {
12126 : 0 : DRV_LOG(ERR, "Host port %u is not the sharing base",
12127 : : port_attr->host_port_id);
12128 : 0 : rte_errno = EINVAL;
12129 : 0 : goto err;
12130 : : }
12131 [ # # ]: 0 : if (port_attr->nb_counters ||
12132 [ # # ]: 0 : port_attr->nb_aging_objects ||
12133 [ # # ]: 0 : port_attr->nb_meters ||
12134 [ # # ]: 0 : port_attr->nb_conn_tracks) {
12135 : 0 : DRV_LOG(ERR,
12136 : : "Object numbers on guest port must be zeros");
12137 : 0 : rte_errno = EINVAL;
12138 : 0 : goto err;
12139 : : }
12140 : 0 : dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
12141 : 0 : priv->shared_host = host_dev;
12142 : 0 : rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
12143 : : rte_memory_order_relaxed);
12144 : : }
12145 : : /* Set backward compatibale mode to support non template RTE FLOW API.*/
12146 : 0 : dr_ctx_attr.bwc = true;
12147 : 0 : priv->dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
12148 : : /* rte_errno has been updated by HWS layer. */
12149 [ # # ]: 0 : if (!priv->dr_ctx)
12150 : 0 : goto err;
12151 : 0 : priv->nb_queue = nb_q_updated;
12152 : 0 : ret = flow_hw_action_template_drop_init(dev, error);
12153 [ # # ]: 0 : if (ret)
12154 : 0 : goto err;
12155 : : /* Initialize quotas */
12156 [ # # # # : 0 : if (port_attr->nb_quotas || (host_priv && host_priv->quota_ctx.devx_obj)) {
# # ]
12157 : 0 : ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
12158 [ # # ]: 0 : if (ret) {
12159 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12160 : : "Failed to initialize quota.");
12161 : 0 : goto err;
12162 : : }
12163 : : }
12164 : : /* Initialize meter library*/
12165 [ # # # # : 0 : if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
# # ]
12166 [ # # ]: 0 : if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
12167 : 0 : goto err;
12168 [ # # ]: 0 : if (priv->sh->config.dv_esw_en) {
12169 : 0 : ret = flow_hw_setup_tx_repr_tagging(dev, error);
12170 [ # # ]: 0 : if (ret)
12171 : 0 : goto err;
12172 : : }
12173 [ # # ]: 0 : if (is_proxy) {
12174 : 0 : ret = flow_hw_create_vport_actions(priv);
12175 [ # # ]: 0 : if (ret) {
12176 : 0 : rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12177 : : NULL, "Failed to create vport actions.");
12178 : 0 : goto err;
12179 : : }
12180 : 0 : ret = flow_hw_create_fdb_ctrl_tables(dev, error);
12181 [ # # ]: 0 : if (ret) {
12182 : 0 : rte_errno = -ret;
12183 : 0 : goto err;
12184 : : }
12185 : : }
12186 [ # # ]: 0 : if (mlx5_vport_tx_metadata_passing_enabled(priv->sh)) {
12187 : 0 : ret = flow_hw_create_nic_ctrl_tables(dev, error);
12188 [ # # ]: 0 : if (ret != 0) {
12189 : 0 : rte_errno = -ret;
12190 : 0 : goto err;
12191 : : }
12192 : : }
12193 [ # # # # : 0 : if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
# # ]
12194 [ # # ]: 0 : if (mlx5_flow_ct_init(dev, port_attr->nb_conn_tracks, nb_q_updated))
12195 : 0 : goto err;
12196 : : }
12197 [ # # # # : 0 : if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
# # ]
12198 [ # # ]: 0 : struct mlx5_hws_cnt_pool *hws_cpool = host_priv ? host_priv->hws_cpool : NULL;
12199 : :
12200 : 0 : ret = mlx5_hws_cnt_pool_create(dev, port_attr->nb_counters,
12201 : : nb_queue, hws_cpool, error);
12202 [ # # ]: 0 : if (ret)
12203 : 0 : goto err;
12204 : : }
12205 [ # # ]: 0 : if (port_attr->nb_aging_objects) {
12206 [ # # ]: 0 : if (port_attr->nb_counters == 0) {
12207 : : /*
12208 : : * Aging management uses counter. Number counters
12209 : : * requesting should take into account a counter for
12210 : : * each flow rules containing AGE without counter.
12211 : : */
12212 : 0 : DRV_LOG(ERR, "Port %u AGE objects are requested (%u) "
12213 : : "without counters requesting.",
12214 : : dev->data->port_id,
12215 : : port_attr->nb_aging_objects);
12216 : 0 : rte_errno = EINVAL;
12217 : 0 : goto err;
12218 : : }
12219 [ # # ]: 0 : if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
12220 : 0 : DRV_LOG(ERR, "Aging is not supported "
12221 : : "in cross vHCA sharing mode");
12222 : : ret = -ENOTSUP;
12223 : 0 : goto err;
12224 : : }
12225 : 0 : strict_queue = !!(port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
12226 : 0 : ret = mlx5_hws_age_pool_init(dev, port_attr->nb_aging_objects,
12227 : : nb_queue, strict_queue);
12228 [ # # ]: 0 : if (ret < 0)
12229 : 0 : goto err;
12230 : : }
12231 : : if (_queue_attr)
12232 : 0 : mlx5_free(_queue_attr);
12233 [ # # ]: 0 : if (port_attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE)
12234 : 0 : priv->hws_strict_queue = 1;
12235 : 0 : dev->flow_fp_ops = &mlx5_flow_hw_fp_ops;
12236 : 0 : return 0;
12237 : 0 : err:
12238 : 0 : __mlx5_flow_hw_resource_release(dev, true);
12239 [ # # ]: 0 : if (_queue_attr)
12240 : 0 : mlx5_free(_queue_attr);
12241 : : /* Do not overwrite the internal errno information. */
12242 [ # # # # ]: 0 : if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE)
12243 : : return ret;
12244 : 0 : return rte_flow_error_set(error, rte_errno,
12245 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
12246 : : "fail to configure port");
12247 : : }
12248 : :
12249 : : /**
12250 : : * Configure port HWS resources.
12251 : : *
12252 : : * @param[in] dev
12253 : : * Pointer to the rte_eth_dev structure.
12254 : : * @param[in] port_attr
12255 : : * Port configuration attributes.
12256 : : * @param[in] nb_queue
12257 : : * Number of queue.
12258 : : * @param[in] queue_attr
12259 : : * Array that holds attributes for each flow queue.
12260 : : * @param[out] error
12261 : : * Pointer to error structure.
12262 : : *
12263 : : * @return
12264 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
12265 : : */
12266 : : static int
12267 : 0 : flow_hw_configure(struct rte_eth_dev *dev,
12268 : : const struct rte_flow_port_attr *port_attr,
12269 : : uint16_t nb_queue,
12270 : : const struct rte_flow_queue_attr *queue_attr[],
12271 : : struct rte_flow_error *error)
12272 : : {
12273 : 0 : struct rte_flow_error shadow_error = {0, };
12274 : :
12275 [ # # ]: 0 : if (!error)
12276 : : error = &shadow_error;
12277 : 0 : return __flow_hw_configure(dev, port_attr, nb_queue, queue_attr, false, error);
12278 : : }
12279 : :
12280 : : /**
12281 : : * Release HWS resources.
12282 : : *
12283 : : * @param[in] dev
12284 : : * Pointer to the rte_eth_dev structure.
12285 : : */
12286 : : void
12287 : 0 : mlx5_flow_hw_resource_release(struct rte_eth_dev *dev)
12288 : : {
12289 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12290 : :
12291 [ # # ]: 0 : if (!priv->dr_ctx)
12292 : : return;
12293 : 0 : __mlx5_flow_hw_resource_release(dev, false);
12294 : : }
12295 : :
12296 : : /* Sets vport tag and mask, for given port, used in HWS rules. */
12297 : : void
12298 : 0 : mlx5_flow_hw_set_port_info(struct rte_eth_dev *dev)
12299 : : {
12300 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12301 : 0 : uint16_t port_id = dev->data->port_id;
12302 : : struct flow_hw_port_info *info;
12303 : :
12304 : : MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
12305 : 0 : info = &mlx5_flow_hw_port_infos[port_id];
12306 : 0 : info->regc_mask = priv->vport_meta_mask;
12307 [ # # ]: 0 : info->regc_value = priv->vport_meta_tag;
12308 [ # # ]: 0 : info->is_wire = mlx5_is_port_on_mpesw_device(priv) ? priv->mpesw_uplink : priv->master;
12309 : 0 : }
12310 : :
12311 : : /* Clears vport tag and mask used for HWS rules. */
12312 : : void
12313 : 0 : mlx5_flow_hw_clear_port_info(struct rte_eth_dev *dev)
12314 : : {
12315 : 0 : uint16_t port_id = dev->data->port_id;
12316 : : struct flow_hw_port_info *info;
12317 : :
12318 : : MLX5_ASSERT(port_id < RTE_MAX_ETHPORTS);
12319 : 0 : info = &mlx5_flow_hw_port_infos[port_id];
12320 : 0 : info->regc_mask = 0;
12321 : 0 : info->regc_value = 0;
12322 : 0 : info->is_wire = 0;
12323 : 0 : }
12324 : :
12325 : : static int
12326 : 0 : flow_hw_conntrack_destroy(struct rte_eth_dev *dev,
12327 : : uint32_t idx,
12328 : : struct rte_flow_error *error)
12329 : : {
12330 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12331 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12332 : : struct mlx5_aso_ct_action *ct;
12333 : :
12334 [ # # ]: 0 : if (priv->shared_host)
12335 : 0 : return rte_flow_error_set(error, ENOTSUP,
12336 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12337 : : NULL,
12338 : : "CT destruction is not allowed to guest port");
12339 : 0 : ct = mlx5_ipool_get(pool->cts, idx);
12340 [ # # ]: 0 : if (!ct) {
12341 : 0 : return rte_flow_error_set(error, EINVAL,
12342 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12343 : : NULL,
12344 : : "Invalid CT destruction index");
12345 : : }
12346 : 0 : rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
12347 : : rte_memory_order_relaxed);
12348 : 0 : mlx5_ipool_free(pool->cts, idx);
12349 : 0 : return 0;
12350 : : }
12351 : :
12352 : : static int
12353 : 0 : flow_hw_conntrack_query(struct rte_eth_dev *dev, uint32_t queue, uint32_t idx,
12354 : : struct rte_flow_action_conntrack *profile,
12355 : : void *user_data, bool push,
12356 : : struct rte_flow_error *error)
12357 : : {
12358 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12359 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12360 : : struct mlx5_aso_ct_action *ct;
12361 : :
12362 [ # # ]: 0 : if (priv->shared_host)
12363 : 0 : return rte_flow_error_set(error, ENOTSUP,
12364 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12365 : : NULL,
12366 : : "CT query is not allowed to guest port");
12367 : 0 : ct = mlx5_ipool_get(pool->cts, idx);
12368 [ # # ]: 0 : if (!ct) {
12369 : 0 : return rte_flow_error_set(error, EINVAL,
12370 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12371 : : NULL,
12372 : : "Invalid CT query index");
12373 : : }
12374 : 0 : profile->peer_port = ct->peer;
12375 : 0 : profile->is_original_dir = ct->is_original;
12376 [ # # ]: 0 : if (mlx5_aso_ct_query_by_wqe(priv->sh, queue, ct, profile, user_data, push))
12377 : 0 : return rte_flow_error_set(error, EIO,
12378 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12379 : : NULL,
12380 : : "Failed to query CT context");
12381 : : return 0;
12382 : : }
12383 : :
12384 : :
12385 : : static int
12386 : 0 : flow_hw_conntrack_update(struct rte_eth_dev *dev, uint32_t queue,
12387 : : const struct rte_flow_modify_conntrack *action_conf,
12388 : : uint32_t idx, void *user_data, bool push,
12389 : : struct rte_flow_error *error)
12390 : : {
12391 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12392 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12393 : : struct mlx5_aso_ct_action *ct;
12394 : : const struct rte_flow_action_conntrack *new_prf;
12395 : : int ret = 0;
12396 : :
12397 [ # # ]: 0 : if (priv->shared_host)
12398 : 0 : return rte_flow_error_set(error, ENOTSUP,
12399 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12400 : : NULL,
12401 : : "CT update is not allowed to guest port");
12402 : 0 : ct = mlx5_ipool_get(pool->cts, idx);
12403 [ # # ]: 0 : if (!ct) {
12404 : 0 : return rte_flow_error_set(error, EINVAL,
12405 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12406 : : NULL,
12407 : : "Invalid CT update index");
12408 : : }
12409 : 0 : new_prf = &action_conf->new_ct;
12410 [ # # ]: 0 : if (action_conf->direction)
12411 : 0 : ct->is_original = !!new_prf->is_original_dir;
12412 [ # # ]: 0 : if (action_conf->state) {
12413 : : /* Only validate the profile when it needs to be updated. */
12414 : 0 : ret = mlx5_validate_action_ct(dev, new_prf, error);
12415 [ # # ]: 0 : if (ret)
12416 : : return ret;
12417 : 0 : ret = mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, new_prf,
12418 : : user_data, push);
12419 [ # # ]: 0 : if (ret)
12420 : 0 : return rte_flow_error_set(error, EIO,
12421 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12422 : : NULL,
12423 : : "Failed to send CT context update WQE");
12424 [ # # ]: 0 : if (queue != MLX5_HW_INV_QUEUE)
12425 : : return 0;
12426 : : /* Block until ready or a failure in synchronous mode. */
12427 : 0 : ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12428 [ # # ]: 0 : if (ret)
12429 : 0 : rte_flow_error_set(error, rte_errno,
12430 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12431 : : NULL,
12432 : : "Timeout to get the CT update");
12433 : : }
12434 : : return ret;
12435 : : }
12436 : :
12437 : : static struct rte_flow_action_handle *
12438 : 0 : flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue,
12439 : : const struct rte_flow_action_conntrack *pro,
12440 : : void *user_data, bool push,
12441 : : struct rte_flow_error *error)
12442 : : {
12443 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12444 : 0 : struct mlx5_aso_ct_pool *pool = priv->hws_ctpool;
12445 : : struct mlx5_aso_ct_action *ct;
12446 : 0 : uint32_t ct_idx = 0;
12447 : : int ret;
12448 : : bool async = !!(queue != MLX5_HW_INV_QUEUE);
12449 : :
12450 [ # # ]: 0 : if (priv->shared_host) {
12451 : 0 : rte_flow_error_set(error, ENOTSUP,
12452 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12453 : : NULL,
12454 : : "CT create is not allowed to guest port");
12455 : 0 : return NULL;
12456 : : }
12457 [ # # ]: 0 : if (!pool) {
12458 : 0 : rte_flow_error_set(error, EINVAL,
12459 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12460 : : "CT is not enabled");
12461 : 0 : return 0;
12462 : : }
12463 : 0 : ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx);
12464 [ # # ]: 0 : if (!ct) {
12465 : 0 : rte_flow_error_set(error, rte_errno,
12466 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12467 : : "Failed to allocate CT object");
12468 : 0 : return 0;
12469 : : }
12470 : 0 : ct->offset = ct_idx - 1;
12471 : 0 : ct->is_original = !!pro->is_original_dir;
12472 : 0 : ct->peer = pro->peer_port;
12473 : 0 : ct->pool = pool;
12474 [ # # ]: 0 : if (mlx5_aso_ct_update_by_wqe(priv->sh, queue, ct, pro, user_data, push)) {
12475 : 0 : mlx5_ipool_free(pool->cts, ct_idx);
12476 : 0 : rte_flow_error_set(error, EBUSY,
12477 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12478 : : "Failed to update CT");
12479 : 0 : return 0;
12480 : : }
12481 [ # # ]: 0 : if (!async) {
12482 : 0 : ret = mlx5_aso_ct_available(priv->sh, queue, ct);
12483 [ # # ]: 0 : if (ret) {
12484 : 0 : mlx5_ipool_free(pool->cts, ct_idx);
12485 : 0 : rte_flow_error_set(error, rte_errno,
12486 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12487 : : NULL,
12488 : : "Timeout to get the CT update");
12489 : 0 : return 0;
12490 : : }
12491 : : }
12492 : 0 : return MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(ct_idx);
12493 : : }
12494 : :
12495 : : /**
12496 : : * Validate shared action.
12497 : : *
12498 : : * @param[in] dev
12499 : : * Pointer to the rte_eth_dev structure.
12500 : : * @param[in] queue
12501 : : * Which queue to be used.
12502 : : * @param[in] attr
12503 : : * Operation attribute.
12504 : : * @param[in] conf
12505 : : * Indirect action configuration.
12506 : : * @param[in] action
12507 : : * rte_flow action detail.
12508 : : * @param[in] user_data
12509 : : * Pointer to the user_data.
12510 : : * @param[out] error
12511 : : * Pointer to error structure.
12512 : : *
12513 : : * @return
12514 : : * 0 on success, otherwise negative errno value.
12515 : : */
12516 : : static int
12517 : 0 : flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
12518 : : const struct rte_flow_op_attr *attr,
12519 : : const struct rte_flow_indir_action_conf *conf,
12520 : : const struct rte_flow_action *action,
12521 : : void *user_data,
12522 : : struct rte_flow_error *error)
12523 : : {
12524 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12525 : :
12526 : : RTE_SET_USED(attr);
12527 : : RTE_SET_USED(queue);
12528 : : RTE_SET_USED(user_data);
12529 [ # # # # : 0 : switch (action->type) {
# # # ]
12530 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
12531 [ # # ]: 0 : if (!priv->hws_age_req) {
12532 [ # # ]: 0 : if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_AGE,
12533 : : error))
12534 : 0 : return rte_flow_error_set
12535 : : (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12536 : : NULL, "aging pool not initialized");
12537 : : }
12538 : : break;
12539 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
12540 [ # # ]: 0 : if (!priv->hws_cpool) {
12541 [ # # ]: 0 : if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_COUNT,
12542 : : error))
12543 : 0 : return rte_flow_error_set
12544 : : (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12545 : : NULL, "counters pool not initialized");
12546 : : }
12547 : : break;
12548 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12549 [ # # ]: 0 : if (priv->hws_ctpool == NULL) {
12550 [ # # ]: 0 : if (flow_hw_allocate_actions(dev, MLX5_FLOW_ACTION_CT,
12551 : : error))
12552 : 0 : return rte_flow_error_set
12553 : : (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
12554 : : NULL, "CT pool not initialized");
12555 : : }
12556 : 0 : return mlx5_validate_action_ct(dev, action->conf, error);
12557 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
12558 : 0 : return flow_hw_validate_action_meter_mark(dev, action, true, error);
12559 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
12560 : 0 : return mlx5_flow_dv_action_validate(dev, conf, action, error);
12561 : : case RTE_FLOW_ACTION_TYPE_QUOTA:
12562 : : return 0;
12563 : 0 : default:
12564 : 0 : return rte_flow_error_set(error, ENOTSUP,
12565 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12566 : : "action type not supported");
12567 : : }
12568 : : return 0;
12569 : : }
12570 : :
12571 : : static __rte_always_inline bool
12572 : : flow_hw_action_push(const struct rte_flow_op_attr *attr)
12573 : : {
12574 [ # # # # : 0 : return attr ? !attr->postpone : true;
# # # # #
# # # #
# ]
12575 : : }
12576 : :
12577 : : static __rte_always_inline struct mlx5_hw_q_job *
12578 : : flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12579 : : const struct rte_flow_action_handle *handle,
12580 : : void *user_data, void *query_data,
12581 : : enum mlx5_hw_job_type type,
12582 : : enum mlx5_hw_indirect_type indirect_type,
12583 : : struct rte_flow_error *error)
12584 : : {
12585 : : struct mlx5_hw_q_job *job;
12586 : :
12587 [ # # # # : 0 : if (queue == MLX5_HW_INV_QUEUE)
# # # # #
# # # #
# ]
12588 [ # # # # : 0 : queue = CTRL_QUEUE_ID(priv);
# # # # #
# # # ]
12589 : : job = flow_hw_job_get(priv, queue);
12590 [ # # # # : 0 : if (!job) {
# # # # #
# # # # #
# # # # #
# # # # #
# # # # ]
12591 : 0 : rte_flow_error_set(error, ENOMEM,
12592 : : RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
12593 : : "Action destroy failed due to queue full.");
12594 : 0 : return NULL;
12595 : : }
12596 : 0 : job->type = type;
12597 : 0 : job->action = handle;
12598 : 0 : job->user_data = user_data;
12599 : 0 : job->query.user = query_data;
12600 [ # # # # : 0 : job->indirect_type = indirect_type;
# # # # #
# # # ]
12601 : 0 : return job;
12602 : : }
12603 : :
12604 : : struct mlx5_hw_q_job *
12605 [ # # ]: 0 : mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue,
12606 : : const struct rte_flow_action_handle *handle,
12607 : : void *user_data, void *query_data,
12608 : : enum mlx5_hw_job_type type,
12609 : : struct rte_flow_error *error)
12610 : : {
12611 : 0 : return flow_hw_action_job_init(priv, queue, handle, user_data, query_data,
12612 : : type, MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12613 : : }
12614 : :
12615 : : static __rte_always_inline void
12616 : : flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
12617 : : struct mlx5_hw_q_job *job,
12618 : : bool push, bool aso, bool status)
12619 : : {
12620 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12621 : :
12622 [ # # ]: 0 : if (queue == MLX5_HW_INV_QUEUE)
12623 : 0 : queue = CTRL_QUEUE_ID(priv);
12624 [ # # # # : 0 : if (likely(status)) {
# # # # #
# # # ]
12625 : : /* 1. add new job to a queue */
12626 [ # # # # : 0 : if (!aso)
# # # # #
# ]
12627 [ # # # # : 0 : rte_ring_enqueue(push ?
# # # # #
# # # #
# ]
12628 : 0 : priv->hw_q[queue].indir_cq :
12629 : 0 : priv->hw_q[queue].indir_iq,
12630 : : job);
12631 : : /* 2. send pending jobs */
12632 [ # # # # : 0 : if (push)
# # # # #
# # # #
# ]
12633 : 0 : __flow_hw_push_action(dev, queue);
12634 : : } else {
12635 : : flow_hw_job_put(priv, job, queue);
12636 : : }
12637 : : }
12638 : :
12639 : : /**
12640 : : * Create shared action.
12641 : : *
12642 : : * @param[in] dev
12643 : : * Pointer to the rte_eth_dev structure.
12644 : : * @param[in] queue
12645 : : * Which queue to be used.
12646 : : * @param[in] attr
12647 : : * Operation attribute.
12648 : : * @param[in] conf
12649 : : * Indirect action configuration.
12650 : : * @param[in] action
12651 : : * rte_flow action detail.
12652 : : * @param[in] user_data
12653 : : * Pointer to the user_data.
12654 : : * @param[out] error
12655 : : * Pointer to error structure.
12656 : : *
12657 : : * @return
12658 : : * Action handle on success, NULL otherwise and rte_errno is set.
12659 : : */
12660 : : static struct rte_flow_action_handle *
12661 : 0 : flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
12662 : : const struct rte_flow_op_attr *attr,
12663 : : const struct rte_flow_indir_action_conf *conf,
12664 : : const struct rte_flow_action *action,
12665 : : void *user_data,
12666 : : struct rte_flow_error *error)
12667 : : {
12668 : : struct rte_flow_action_handle *handle = NULL;
12669 : : struct mlx5_hw_q_job *job = NULL;
12670 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
12671 : : const struct rte_flow_action_age *age;
12672 : : struct mlx5_aso_mtr *aso_mtr = NULL;
12673 : : cnt_id_t cnt_id;
12674 : : uint32_t age_idx;
12675 : : bool push = flow_hw_action_push(attr);
12676 : : bool aso = false;
12677 : 0 : bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK;
12678 : : int ret;
12679 : :
12680 [ # # ]: 0 : if (!mlx5_hw_ctx_validate(dev, error))
12681 : : return NULL;
12682 [ # # ]: 0 : if (attr || force_job) {
12683 : : job = flow_hw_action_job_init(priv, queue, NULL, user_data,
12684 : : NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
12685 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12686 : : if (!job)
12687 : 0 : return NULL;
12688 : : }
12689 [ # # # # : 0 : switch (action->type) {
# # # ]
12690 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
12691 [ # # ]: 0 : if (priv->hws_strict_queue) {
12692 : 0 : struct mlx5_age_info *info = GET_PORT_AGE_INFO(priv);
12693 : :
12694 [ # # ]: 0 : if (queue >= info->hw_q_age->nb_rings) {
12695 : 0 : rte_flow_error_set(error, EINVAL,
12696 : : RTE_FLOW_ERROR_TYPE_ACTION,
12697 : : NULL,
12698 : : "Invalid queue ID for indirect AGE.");
12699 : 0 : rte_errno = EINVAL;
12700 : 0 : return NULL;
12701 : : }
12702 : : }
12703 : 0 : age = action->conf;
12704 : 0 : age_idx = mlx5_hws_age_action_create(priv, queue, true, age,
12705 : : 0, error);
12706 [ # # ]: 0 : if (age_idx == 0) {
12707 : 0 : rte_flow_error_set(error, ENODEV,
12708 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12709 : : "AGE are not configured!");
12710 : : } else {
12711 : 0 : age_idx = (MLX5_INDIRECT_ACTION_TYPE_AGE <<
12712 : : MLX5_INDIRECT_ACTION_TYPE_OFFSET) | age_idx;
12713 : 0 : handle =
12714 : 0 : (struct rte_flow_action_handle *)(uintptr_t)age_idx;
12715 : : }
12716 : : break;
12717 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
12718 [ # # ]: 0 : if (mlx5_hws_cnt_shared_get(priv->hws_cpool, &cnt_id, 0))
12719 : 0 : rte_flow_error_set(error, ENODEV,
12720 : : RTE_FLOW_ERROR_TYPE_ACTION,
12721 : : NULL,
12722 : : "counter are not configured!");
12723 : : else
12724 : 0 : handle = (struct rte_flow_action_handle *)
12725 : 0 : (uintptr_t)cnt_id;
12726 : : break;
12727 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
12728 : : aso = true;
12729 : 0 : handle = flow_hw_conntrack_create(dev, queue, action->conf, job,
12730 : : push, error);
12731 : 0 : break;
12732 [ # # ]: 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
12733 : : aso = true;
12734 : : ret = flow_hw_meter_mark_alloc(dev, queue, action, job, push, &aso_mtr, error);
12735 [ # # ]: 0 : if (ret) {
12736 [ # # ]: 0 : if (ret != -EIO) {
12737 [ # # ]: 0 : if (queue == MLX5_HW_INV_QUEUE)
12738 : 0 : queue = CTRL_QUEUE_ID(priv);
12739 : : flow_hw_job_put(priv, job, queue);
12740 : : }
12741 : : break;
12742 : : }
12743 : 0 : handle = (void *)(uintptr_t)job->action;
12744 : 0 : break;
12745 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
12746 : 0 : handle = mlx5_flow_dv_action_create(dev, conf, action, error);
12747 : 0 : break;
12748 : 0 : case RTE_FLOW_ACTION_TYPE_QUOTA:
12749 : : aso = true;
12750 : 0 : handle = mlx5_quota_alloc(dev, queue, action->conf,
12751 : : job, push, error);
12752 : 0 : break;
12753 : 0 : default:
12754 : 0 : rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12755 : : NULL, "action type not supported");
12756 : 0 : break;
12757 : : }
12758 [ # # # # ]: 0 : if (job && (!force_job || handle)) {
12759 [ # # ]: 0 : job->action = handle;
12760 : : flow_hw_action_finalize(dev, queue, job, push, aso,
12761 : : handle != NULL);
12762 : : }
12763 : : return handle;
12764 : : }
12765 : :
12766 : : static int
12767 : 0 : mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue,
12768 : : const struct rte_flow_update_meter_mark *upd_meter_mark,
12769 : : uint32_t idx, bool push,
12770 : : struct mlx5_hw_q_job *job, struct rte_flow_error *error)
12771 : : {
12772 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12773 : 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12774 : : const struct rte_flow_action_meter_mark *meter_mark = &upd_meter_mark->meter_mark;
12775 : 0 : struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12776 : : struct mlx5_flow_meter_info *fm;
12777 : :
12778 [ # # ]: 0 : if (!aso_mtr)
12779 : 0 : return rte_flow_error_set(error, EINVAL,
12780 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12781 : : NULL, "Invalid meter_mark update index");
12782 : : fm = &aso_mtr->fm;
12783 [ # # ]: 0 : if (upd_meter_mark->profile_valid)
12784 : 0 : fm->profile = (struct mlx5_flow_meter_profile *)
12785 : 0 : (meter_mark->profile);
12786 [ # # ]: 0 : if (upd_meter_mark->color_mode_valid)
12787 : 0 : fm->color_aware = meter_mark->color_mode;
12788 [ # # ]: 0 : if (upd_meter_mark->state_valid)
12789 : 0 : fm->is_enable = meter_mark->state;
12790 [ # # ]: 0 : aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ?
12791 : : ASO_METER_WAIT : ASO_METER_WAIT_ASYNC;
12792 : : /* Update ASO flow meter by wqe. */
12793 [ # # ]: 0 : if (mlx5_aso_meter_update_by_wqe(priv, queue,
12794 : : aso_mtr, &priv->mtr_bulk, job, push))
12795 : 0 : return rte_flow_error_set(error, EINVAL,
12796 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12797 : : NULL, "Unable to update ASO meter WQE");
12798 : : /* Wait for ASO object completion. */
12799 [ # # # # ]: 0 : if (queue == MLX5_HW_INV_QUEUE &&
12800 : 0 : mlx5_aso_mtr_wait(priv, aso_mtr, true))
12801 : 0 : return rte_flow_error_set(error, EINVAL,
12802 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12803 : : NULL, "Unable to wait for ASO meter CQE");
12804 : : return 0;
12805 : : }
12806 : :
12807 : : /**
12808 : : * Update shared action.
12809 : : *
12810 : : * @param[in] dev
12811 : : * Pointer to the rte_eth_dev structure.
12812 : : * @param[in] queue
12813 : : * Which queue to be used.
12814 : : * @param[in] attr
12815 : : * Operation attribute.
12816 : : * @param[in] handle
12817 : : * Action handle to be updated.
12818 : : * @param[in] update
12819 : : * Update value.
12820 : : * @param[in] user_data
12821 : : * Pointer to the user_data.
12822 : : * @param[out] error
12823 : : * Pointer to error structure.
12824 : : *
12825 : : * @return
12826 : : * 0 on success, negative value otherwise and rte_errno is set.
12827 : : */
12828 : : static int
12829 : 0 : flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
12830 : : const struct rte_flow_op_attr *attr,
12831 : : struct rte_flow_action_handle *handle,
12832 : : const void *update,
12833 : : void *user_data,
12834 : : struct rte_flow_error *error)
12835 : : {
12836 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12837 : : const struct rte_flow_modify_conntrack *ct_conf =
12838 : : (const struct rte_flow_modify_conntrack *)update;
12839 : : struct mlx5_hw_q_job *job = NULL;
12840 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12841 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12842 [ # # ]: 0 : uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12843 : : int ret = 0;
12844 : : bool push = flow_hw_action_push(attr);
12845 : : bool aso = false;
12846 : 0 : bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12847 : :
12848 [ # # ]: 0 : if (attr || force_job) {
12849 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
12850 : : NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
12851 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12852 : : if (!job)
12853 : 0 : return -rte_errno;
12854 : : }
12855 [ # # # # : 0 : switch (type) {
# # ]
12856 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
12857 : 0 : ret = mlx5_hws_age_action_update(priv, idx, update, error);
12858 : 0 : break;
12859 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
12860 [ # # ]: 0 : if (ct_conf->state)
12861 : : aso = true;
12862 : 0 : ret = flow_hw_conntrack_update(dev, queue, update, idx,
12863 : : job, push, error);
12864 : 0 : break;
12865 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12866 : : aso = true;
12867 : 0 : ret = mlx5_flow_update_meter_mark(dev, queue, update, idx, push,
12868 : : job, error);
12869 : 0 : break;
12870 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
12871 : 0 : ret = mlx5_flow_dv_action_update(dev, handle, update, error);
12872 : 0 : break;
12873 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12874 : : aso = true;
12875 : 0 : ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
12876 : : job, push, error);
12877 : 0 : break;
12878 : 0 : default:
12879 : : ret = -ENOTSUP;
12880 : 0 : rte_flow_error_set(error, ENOTSUP,
12881 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12882 : : "action type not supported");
12883 : 0 : break;
12884 : : }
12885 [ # # ]: 0 : if (job && !force_job)
12886 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
12887 : : return ret;
12888 : : }
12889 : :
12890 : : /**
12891 : : * Destroy shared action.
12892 : : *
12893 : : * @param[in] dev
12894 : : * Pointer to the rte_eth_dev structure.
12895 : : * @param[in] queue
12896 : : * Which queue to be used.
12897 : : * @param[in] attr
12898 : : * Operation attribute.
12899 : : * @param[in] handle
12900 : : * Action handle to be destroyed.
12901 : : * @param[in] user_data
12902 : : * Pointer to the user_data.
12903 : : * @param[out] error
12904 : : * Pointer to error structure.
12905 : : *
12906 : : * @return
12907 : : * 0 on success, negative value otherwise and rte_errno is set.
12908 : : */
12909 : : static int
12910 : 0 : flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
12911 : : const struct rte_flow_op_attr *attr,
12912 : : struct rte_flow_action_handle *handle,
12913 : : void *user_data,
12914 : : struct rte_flow_error *error)
12915 : : {
12916 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)handle;
12917 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
12918 : 0 : uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
12919 : : uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
12920 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
12921 [ # # ]: 0 : struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
12922 : : struct mlx5_hw_q_job *job = NULL;
12923 : : struct mlx5_aso_mtr *aso_mtr;
12924 : : struct mlx5_flow_meter_info *fm;
12925 : : bool push = flow_hw_action_push(attr);
12926 : : bool aso = false;
12927 : : int ret = 0;
12928 : 0 : bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
12929 : :
12930 [ # # ]: 0 : if (attr || force_job) {
12931 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
12932 : : NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
12933 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
12934 : : if (!job)
12935 : 0 : return -rte_errno;
12936 : : }
12937 [ # # # # : 0 : switch (type) {
# # # ]
12938 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
12939 : 0 : ret = mlx5_hws_age_action_destroy(priv, age_idx, error);
12940 : 0 : break;
12941 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
12942 [ # # ]: 0 : age_idx = mlx5_hws_cnt_age_get(priv->hws_cpool, act_idx);
12943 [ # # ]: 0 : if (age_idx != 0)
12944 : : /*
12945 : : * If this counter belongs to indirect AGE, here is the
12946 : : * time to update the AGE.
12947 : : */
12948 : : mlx5_hws_age_nb_cnt_decrease(priv, age_idx);
12949 [ # # ]: 0 : mlx5_hws_cnt_shared_put(priv->hws_cpool, &act_idx);
12950 : : break;
12951 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
12952 : 0 : ret = flow_hw_conntrack_destroy(dev, idx, error);
12953 : 0 : break;
12954 : 0 : case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
12955 : 0 : aso_mtr = mlx5_ipool_get(pool->idx_pool, idx);
12956 [ # # ]: 0 : if (!aso_mtr) {
12957 : : ret = -EINVAL;
12958 : 0 : rte_flow_error_set(error, EINVAL,
12959 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12960 : : NULL, "Invalid meter_mark destroy index");
12961 : 0 : break;
12962 : : }
12963 : : fm = &aso_mtr->fm;
12964 : 0 : fm->is_enable = 0;
12965 : : /* Update ASO flow meter by wqe. */
12966 [ # # ]: 0 : if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr,
12967 : : &priv->mtr_bulk, job, push)) {
12968 : : ret = -EINVAL;
12969 : 0 : rte_flow_error_set(error, EINVAL,
12970 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12971 : : NULL, "Unable to update ASO meter WQE");
12972 : 0 : break;
12973 : : }
12974 : : /* Wait for ASO object completion. */
12975 [ # # ]: 0 : if (queue == MLX5_HW_INV_QUEUE) {
12976 [ # # ]: 0 : if (mlx5_aso_mtr_wait(priv, aso_mtr, true)) {
12977 : : ret = -EINVAL;
12978 : 0 : rte_flow_error_set(error, EINVAL,
12979 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12980 : : NULL, "Unable to wait for ASO meter CQE");
12981 : : }
12982 : 0 : mlx5_ipool_free(pool->idx_pool, idx);
12983 [ # # ]: 0 : if (ret < 0)
12984 : : break;
12985 : : }
12986 : : aso = true;
12987 : : break;
12988 : 0 : case MLX5_INDIRECT_ACTION_TYPE_RSS:
12989 : 0 : ret = mlx5_flow_dv_action_destroy(dev, handle, error);
12990 : 0 : break;
12991 : : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
12992 : : break;
12993 : 0 : default:
12994 : : ret = -ENOTSUP;
12995 : 0 : rte_flow_error_set(error, ENOTSUP,
12996 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12997 : : "action type not supported");
12998 : 0 : break;
12999 : : }
13000 [ # # ]: 0 : if (job && !force_job)
13001 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
13002 : : return ret;
13003 : : }
13004 : :
13005 : : static int
13006 : 0 : flow_hw_query_counter(const struct rte_eth_dev *dev, uint32_t counter,
13007 : : void *data, struct rte_flow_error *error)
13008 : : {
13009 : : struct mlx5_hws_cnt_pool *hpool;
13010 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
13011 : : struct mlx5_hws_cnt *cnt;
13012 : : struct rte_flow_query_count *qc = data;
13013 : : uint32_t iidx;
13014 : : uint64_t pkts, bytes;
13015 : :
13016 [ # # ]: 0 : if (!mlx5_hws_cnt_id_valid(counter))
13017 : 0 : return rte_flow_error_set(error, EINVAL,
13018 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13019 : : "counter are not available");
13020 [ # # ]: 0 : hpool = mlx5_hws_cnt_host_pool(priv->hws_cpool);
13021 : : iidx = mlx5_hws_cnt_iidx(hpool, counter);
13022 : 0 : cnt = &hpool->pool[iidx];
13023 : : __hws_cnt_query_raw(priv->hws_cpool, counter, &pkts, &bytes);
13024 : 0 : qc->hits_set = 1;
13025 : 0 : qc->bytes_set = 1;
13026 : 0 : qc->hits = pkts - cnt->reset.hits;
13027 : 0 : qc->bytes = bytes - cnt->reset.bytes;
13028 [ # # ]: 0 : if (qc->reset) {
13029 : 0 : cnt->reset.bytes = bytes;
13030 : 0 : cnt->reset.hits = pkts;
13031 : : }
13032 : : return 0;
13033 : : }
13034 : :
13035 : : /**
13036 : : * Query a flow rule AGE action for aging information.
13037 : : *
13038 : : * @param[in] dev
13039 : : * Pointer to Ethernet device.
13040 : : * @param[in] age_idx
13041 : : * Index of AGE action parameter.
13042 : : * @param[out] data
13043 : : * Data retrieved by the query.
13044 : : * @param[out] error
13045 : : * Perform verbose error reporting if not NULL.
13046 : : *
13047 : : * @return
13048 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
13049 : : */
13050 : : static int
13051 : 0 : flow_hw_query_age(const struct rte_eth_dev *dev, uint32_t age_idx, void *data,
13052 : : struct rte_flow_error *error)
13053 : : {
13054 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13055 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
13056 : 0 : struct mlx5_indexed_pool *ipool = age_info->ages_ipool;
13057 : 0 : struct mlx5_hws_age_param *param = mlx5_ipool_get(ipool, age_idx);
13058 : : struct rte_flow_query_age *resp = data;
13059 : :
13060 [ # # # # ]: 0 : if (!param || !param->timeout)
13061 : 0 : return rte_flow_error_set(error, EINVAL,
13062 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13063 : : NULL, "age data not available");
13064 [ # # # ]: 0 : switch (rte_atomic_load_explicit(¶m->state, rte_memory_order_relaxed)) {
13065 : 0 : case HWS_AGE_AGED_OUT_REPORTED:
13066 : : case HWS_AGE_AGED_OUT_NOT_REPORTED:
13067 : 0 : resp->aged = 1;
13068 : 0 : break;
13069 : 0 : case HWS_AGE_CANDIDATE:
13070 : : case HWS_AGE_CANDIDATE_INSIDE_RING:
13071 : 0 : resp->aged = 0;
13072 : 0 : break;
13073 : : case HWS_AGE_FREE:
13074 : : /*
13075 : : * When state is FREE the flow itself should be invalid.
13076 : : * Fall-through.
13077 : : */
13078 : : default:
13079 : : MLX5_ASSERT(0);
13080 : : break;
13081 : : }
13082 : 0 : resp->sec_since_last_hit_valid = !resp->aged;
13083 [ # # ]: 0 : if (resp->sec_since_last_hit_valid)
13084 : 0 : resp->sec_since_last_hit = rte_atomic_load_explicit
13085 : : (¶m->sec_since_last_hit, rte_memory_order_relaxed);
13086 : : return 0;
13087 : : }
13088 : :
13089 : : static int
13090 : 0 : flow_hw_query(struct rte_eth_dev *dev, struct rte_flow *flow,
13091 : : const struct rte_flow_action *actions, void *data,
13092 : : struct rte_flow_error *error)
13093 : : {
13094 : : int ret = -EINVAL;
13095 : : struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
13096 : : struct rte_flow_hw_aux *aux;
13097 : :
13098 [ # # ]: 0 : for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
13099 [ # # # # ]: 0 : switch (actions->type) {
13100 : : case RTE_FLOW_ACTION_TYPE_VOID:
13101 : : break;
13102 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
13103 [ # # ]: 0 : if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_CNT_ID))
13104 : 0 : return rte_flow_error_set(error, EINVAL,
13105 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13106 : : "counter not defined in the rule");
13107 : 0 : ret = flow_hw_query_counter(dev, hw_flow->cnt_id, data,
13108 : : error);
13109 : 0 : break;
13110 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
13111 [ # # ]: 0 : if (!(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX))
13112 : 0 : return rte_flow_error_set(error, EINVAL,
13113 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13114 : : "age data not available");
13115 [ # # ]: 0 : aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
13116 : 0 : ret = flow_hw_query_age(dev, mlx5_flow_hw_aux_get_age_idx(hw_flow, aux),
13117 : : data, error);
13118 : 0 : break;
13119 : 0 : default:
13120 : 0 : return rte_flow_error_set(error, ENOTSUP,
13121 : : RTE_FLOW_ERROR_TYPE_ACTION,
13122 : : actions,
13123 : : "action not supported");
13124 : : }
13125 : : }
13126 : : return ret;
13127 : : }
13128 : :
13129 : : /**
13130 : : * Validate indirect action.
13131 : : *
13132 : : * @param[in] dev
13133 : : * Pointer to the Ethernet device structure.
13134 : : * @param[in] conf
13135 : : * Shared action configuration.
13136 : : * @param[in] action
13137 : : * Action specification used to create indirect action.
13138 : : * @param[out] error
13139 : : * Perform verbose error reporting if not NULL. Initialized in case of
13140 : : * error only.
13141 : : *
13142 : : * @return
13143 : : * 0 on success, otherwise negative errno value.
13144 : : */
13145 : : static int
13146 : 0 : flow_hw_action_validate(struct rte_eth_dev *dev,
13147 : : const struct rte_flow_indir_action_conf *conf,
13148 : : const struct rte_flow_action *action,
13149 : : struct rte_flow_error *err)
13150 : : {
13151 : 0 : struct rte_flow_error shadow_error = {0, };
13152 : :
13153 [ # # ]: 0 : if (!err)
13154 : : err = &shadow_error;
13155 : 0 : return flow_hw_action_handle_validate(dev, MLX5_HW_INV_QUEUE, NULL,
13156 : : conf, action, NULL, err);
13157 : : }
13158 : :
13159 : : /**
13160 : : * Create indirect action.
13161 : : *
13162 : : * @param[in] dev
13163 : : * Pointer to the Ethernet device structure.
13164 : : * @param[in] conf
13165 : : * Shared action configuration.
13166 : : * @param[in] action
13167 : : * Action specification used to create indirect action.
13168 : : * @param[out] error
13169 : : * Perform verbose error reporting if not NULL. Initialized in case of
13170 : : * error only.
13171 : : *
13172 : : * @return
13173 : : * A valid shared action handle in case of success, NULL otherwise and
13174 : : * rte_errno is set.
13175 : : */
13176 : : static struct rte_flow_action_handle *
13177 : 0 : flow_hw_action_create(struct rte_eth_dev *dev,
13178 : : const struct rte_flow_indir_action_conf *conf,
13179 : : const struct rte_flow_action *action,
13180 : : struct rte_flow_error *err)
13181 : : {
13182 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13183 : :
13184 [ # # # # ]: 0 : if (action->type == RTE_FLOW_ACTION_TYPE_AGE && priv->hws_strict_queue) {
13185 : 0 : rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_STATE, NULL,
13186 : : "Cannot create age action synchronously with strict queueing");
13187 : 0 : return NULL;
13188 : : }
13189 : :
13190 : 0 : return flow_hw_action_handle_create(dev, MLX5_HW_INV_QUEUE,
13191 : : NULL, conf, action, NULL, err);
13192 : : }
13193 : :
13194 : : /**
13195 : : * Destroy the indirect action.
13196 : : * Release action related resources on the NIC and the memory.
13197 : : * Lock free, (mutex should be acquired by caller).
13198 : : * Dispatcher for action type specific call.
13199 : : *
13200 : : * @param[in] dev
13201 : : * Pointer to the Ethernet device structure.
13202 : : * @param[in] handle
13203 : : * The indirect action object handle to be removed.
13204 : : * @param[out] error
13205 : : * Perform verbose error reporting if not NULL. Initialized in case of
13206 : : * error only.
13207 : : *
13208 : : * @return
13209 : : * 0 on success, otherwise negative errno value.
13210 : : */
13211 : : static int
13212 : 0 : flow_hw_action_destroy(struct rte_eth_dev *dev,
13213 : : struct rte_flow_action_handle *handle,
13214 : : struct rte_flow_error *error)
13215 : : {
13216 : 0 : return flow_hw_action_handle_destroy(dev, MLX5_HW_INV_QUEUE,
13217 : : NULL, handle, NULL, error);
13218 : : }
13219 : :
13220 : : /**
13221 : : * Updates in place shared action configuration.
13222 : : *
13223 : : * @param[in] dev
13224 : : * Pointer to the Ethernet device structure.
13225 : : * @param[in] handle
13226 : : * The indirect action object handle to be updated.
13227 : : * @param[in] update
13228 : : * Action specification used to modify the action pointed by *handle*.
13229 : : * *update* could be of same type with the action pointed by the *handle*
13230 : : * handle argument, or some other structures like a wrapper, depending on
13231 : : * the indirect action type.
13232 : : * @param[out] error
13233 : : * Perform verbose error reporting if not NULL. Initialized in case of
13234 : : * error only.
13235 : : *
13236 : : * @return
13237 : : * 0 on success, otherwise negative errno value.
13238 : : */
13239 : : static int
13240 : 0 : flow_hw_action_update(struct rte_eth_dev *dev,
13241 : : struct rte_flow_action_handle *handle,
13242 : : const void *update,
13243 : : struct rte_flow_error *err)
13244 : : {
13245 : 0 : return flow_hw_action_handle_update(dev, MLX5_HW_INV_QUEUE,
13246 : : NULL, handle, update, NULL, err);
13247 : : }
13248 : :
13249 : : static int
13250 : 0 : flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
13251 : : const struct rte_flow_op_attr *attr,
13252 : : const struct rte_flow_action_handle *handle,
13253 : : void *data, void *user_data,
13254 : : struct rte_flow_error *error)
13255 : : {
13256 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13257 : : struct mlx5_hw_q_job *job = NULL;
13258 : 0 : uint32_t act_idx = (uint32_t)(uintptr_t)handle;
13259 : 0 : uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
13260 [ # # ]: 0 : uint32_t idx = MLX5_INDIRECT_ACTION_IDX_GET(handle);
13261 : : uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
13262 : : int ret;
13263 : : bool push = flow_hw_action_push(attr);
13264 : : bool aso = false;
13265 : :
13266 [ # # ]: 0 : if (attr) {
13267 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
13268 : : data, MLX5_HW_Q_JOB_TYPE_QUERY,
13269 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
13270 : : if (!job)
13271 : 0 : return -rte_errno;
13272 : : }
13273 [ # # # # : 0 : switch (type) {
# ]
13274 : 0 : case MLX5_INDIRECT_ACTION_TYPE_AGE:
13275 : 0 : ret = flow_hw_query_age(dev, age_idx, data, error);
13276 : 0 : break;
13277 : 0 : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13278 : 0 : ret = flow_hw_query_counter(dev, act_idx, data, error);
13279 : 0 : break;
13280 : 0 : case MLX5_INDIRECT_ACTION_TYPE_CT:
13281 : : aso = true;
13282 [ # # ]: 0 : if (job)
13283 : 0 : job->query.user = data;
13284 : 0 : ret = flow_hw_conntrack_query(dev, queue, idx, data,
13285 : : job, push, error);
13286 : 0 : break;
13287 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
13288 : : aso = true;
13289 : 0 : ret = mlx5_quota_query(dev, queue, handle, data,
13290 : : job, push, error);
13291 : 0 : break;
13292 : 0 : default:
13293 : : ret = -ENOTSUP;
13294 : 0 : rte_flow_error_set(error, ENOTSUP,
13295 : : RTE_FLOW_ERROR_TYPE_ACTION, NULL,
13296 : : "action type not supported");
13297 : 0 : break;
13298 : : }
13299 [ # # ]: 0 : if (job)
13300 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
13301 : : return ret;
13302 : : }
13303 : :
13304 : : static int
13305 : 0 : flow_hw_async_action_handle_query_update
13306 : : (struct rte_eth_dev *dev, uint32_t queue,
13307 : : const struct rte_flow_op_attr *attr,
13308 : : struct rte_flow_action_handle *handle,
13309 : : const void *update, void *query,
13310 : : enum rte_flow_query_update_mode qu_mode,
13311 : : void *user_data, struct rte_flow_error *error)
13312 : : {
13313 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
13314 : : bool push = flow_hw_action_push(attr);
13315 : : bool aso = false;
13316 : : struct mlx5_hw_q_job *job = NULL;
13317 : : int ret = 0;
13318 : :
13319 [ # # ]: 0 : if (attr) {
13320 : : job = flow_hw_action_job_init(priv, queue, handle, user_data,
13321 : : query,
13322 : : MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
13323 : : MLX5_HW_INDIRECT_TYPE_LEGACY, error);
13324 : : if (!job)
13325 : 0 : return -rte_errno;
13326 : : }
13327 [ # # ]: 0 : switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
13328 : 0 : case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
13329 [ # # ]: 0 : if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
13330 : 0 : ret = rte_flow_error_set
13331 : : (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
13332 : : NULL, "quota action must query before update");
13333 : 0 : break;
13334 : : }
13335 : : aso = true;
13336 : 0 : ret = mlx5_quota_query_update(dev, queue, handle,
13337 : : update, query, job, push, error);
13338 : 0 : break;
13339 : 0 : default:
13340 : 0 : ret = rte_flow_error_set(error, ENOTSUP,
13341 : : RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
13342 : : }
13343 [ # # ]: 0 : if (job)
13344 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
13345 : : return ret;
13346 : : }
13347 : :
13348 : : static int
13349 : 0 : flow_hw_action_query(struct rte_eth_dev *dev,
13350 : : const struct rte_flow_action_handle *handle, void *data,
13351 : : struct rte_flow_error *error)
13352 : : {
13353 : 0 : return flow_hw_action_handle_query(dev, MLX5_HW_INV_QUEUE, NULL,
13354 : : handle, data, NULL, error);
13355 : : }
13356 : :
13357 : : static int
13358 : 0 : flow_hw_action_query_update(struct rte_eth_dev *dev,
13359 : : struct rte_flow_action_handle *handle,
13360 : : const void *update, void *query,
13361 : : enum rte_flow_query_update_mode qu_mode,
13362 : : struct rte_flow_error *error)
13363 : : {
13364 : 0 : return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
13365 : : NULL, handle, update,
13366 : : query, qu_mode, NULL,
13367 : : error);
13368 : : }
13369 : :
13370 : : /**
13371 : : * Get aged-out flows of a given port on the given HWS flow queue.
13372 : : *
13373 : : * @param[in] dev
13374 : : * Pointer to the Ethernet device structure.
13375 : : * @param[in] queue_id
13376 : : * Flow queue to query. Ignored when RTE_FLOW_PORT_FLAG_STRICT_QUEUE not set.
13377 : : * @param[in, out] contexts
13378 : : * The address of an array of pointers to the aged-out flows contexts.
13379 : : * @param[in] nb_contexts
13380 : : * The length of context array pointers.
13381 : : * @param[out] error
13382 : : * Perform verbose error reporting if not NULL. Initialized in case of
13383 : : * error only.
13384 : : *
13385 : : * @return
13386 : : * if nb_contexts is 0, return the amount of all aged contexts.
13387 : : * if nb_contexts is not 0 , return the amount of aged flows reported
13388 : : * in the context array, otherwise negative errno value.
13389 : : */
13390 : : static int
13391 : 0 : flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id,
13392 : : void **contexts, uint32_t nb_contexts,
13393 : : struct rte_flow_error *error)
13394 : : {
13395 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13396 : 0 : struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv);
13397 : : struct rte_ring *r;
13398 : : int nb_flows = 0;
13399 : :
13400 [ # # ]: 0 : if (nb_contexts && !contexts)
13401 : 0 : return rte_flow_error_set(error, EINVAL,
13402 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13403 : : NULL, "empty context");
13404 [ # # ]: 0 : if (!priv->hws_age_req)
13405 : 0 : return rte_flow_error_set(error, ENOENT,
13406 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13407 : : NULL, "No aging initialized");
13408 [ # # ]: 0 : if (priv->hws_strict_queue) {
13409 : : /* Queue is invalid in sync query. Sync query and strict queueing is disallowed. */
13410 : : MLX5_ASSERT(queue_id != MLX5_HW_INV_QUEUE);
13411 [ # # ]: 0 : if (queue_id >= age_info->hw_q_age->nb_rings)
13412 : 0 : return rte_flow_error_set(error, EINVAL,
13413 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13414 : : NULL, "invalid queue id");
13415 : 0 : r = age_info->hw_q_age->aged_lists[queue_id];
13416 : : } else {
13417 : 0 : r = age_info->hw_age.aged_list;
13418 : 0 : MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13419 : : }
13420 [ # # ]: 0 : if (nb_contexts == 0)
13421 : 0 : return rte_ring_count(r);
13422 [ # # ]: 0 : while ((uint32_t)nb_flows < nb_contexts) {
13423 : : uint32_t age_idx;
13424 : :
13425 : : if (rte_ring_dequeue_elem(r, &age_idx, sizeof(uint32_t)) < 0)
13426 : : break;
13427 : : /* get the AGE context if the aged-out index is still valid. */
13428 : 0 : contexts[nb_flows] = mlx5_hws_age_context_get(priv, age_idx);
13429 [ # # ]: 0 : if (!contexts[nb_flows])
13430 : 0 : continue;
13431 : 0 : nb_flows++;
13432 : : }
13433 : : return nb_flows;
13434 : : }
13435 : :
13436 : : /**
13437 : : * Get aged-out flows.
13438 : : *
13439 : : * This function is relevant only if RTE_FLOW_PORT_FLAG_STRICT_QUEUE isn't set.
13440 : : *
13441 : : * @param[in] dev
13442 : : * Pointer to the Ethernet device structure.
13443 : : * @param[in] contexts
13444 : : * The address of an array of pointers to the aged-out flows contexts.
13445 : : * @param[in] nb_contexts
13446 : : * The length of context array pointers.
13447 : : * @param[out] error
13448 : : * Perform verbose error reporting if not NULL. Initialized in case of
13449 : : * error only.
13450 : : *
13451 : : * @return
13452 : : * how many contexts get in success, otherwise negative errno value.
13453 : : * if nb_contexts is 0, return the amount of all aged contexts.
13454 : : * if nb_contexts is not 0 , return the amount of aged flows reported
13455 : : * in the context array.
13456 : : */
13457 : : static int
13458 : 0 : flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
13459 : : uint32_t nb_contexts, struct rte_flow_error *error)
13460 : : {
13461 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13462 : :
13463 [ # # ]: 0 : if (priv->hws_strict_queue)
13464 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_STATE, NULL,
13465 : : "Cannot get aged flows synchronously with strict queueing");
13466 : :
13467 : 0 : return flow_hw_get_q_aged_flows(dev, MLX5_HW_INV_QUEUE, contexts, nb_contexts, error);
13468 : : }
13469 : : /**
13470 : : * Initialization function for non template API which calls
13471 : : * flow_hw_configure with default values.
13472 : : * Configure non queues cause 1 queue is configured by default for inner usage.
13473 : : *
13474 : : * @param[in] dev
13475 : : * Pointer to the Ethernet device structure.
13476 : : * @param[out] error
13477 : : * Pointer to the error structure.
13478 : : *
13479 : : * @return
13480 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
13481 : : */
13482 : : int
13483 : 0 : mlx5_flow_hw_init(struct rte_eth_dev *dev,
13484 : : struct rte_flow_error *error)
13485 : : {
13486 : 0 : const struct rte_flow_port_attr port_attr = {0};
13487 : 0 : const struct rte_flow_queue_attr queue_attr = {.size = MLX5_NT_DEFAULT_QUEUE_SIZE};
13488 : 0 : const struct rte_flow_queue_attr *attr_list = &queue_attr;
13489 : :
13490 : : /**
13491 : : * If user uses template and non template API:
13492 : : * User will call flow_hw_configure and non template
13493 : : * API will use the allocated actions.
13494 : : * Init function will not call flow_hw_configure.
13495 : : *
13496 : : * If user uses only non template API's:
13497 : : * Init function will call flow_hw_configure.
13498 : : * It will not allocate memory for actions.
13499 : : * When needed allocation, it will handle same as for SWS today,
13500 : : * meaning using bulk allocations and resize as needed.
13501 : : */
13502 : : /* Configure hws with default values. */
13503 : 0 : DRV_LOG(DEBUG, "Apply default configuration, zero number of queues, inner control queue size is %u",
13504 : : MLX5_NT_DEFAULT_QUEUE_SIZE);
13505 : 0 : return __flow_hw_configure(dev, &port_attr, 0, &attr_list, true, error);
13506 : : }
13507 : :
13508 : 0 : static int flow_hw_prepare(struct rte_eth_dev *dev,
13509 : : const struct rte_flow_action actions[] __rte_unused,
13510 : : enum mlx5_flow_type type,
13511 : : struct rte_flow_hw **flow,
13512 : : struct rte_flow_error *error)
13513 : : {
13514 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13515 : 0 : uint32_t idx = 0;
13516 : :
13517 : : /*
13518 : : * Notice pool idx size = (sizeof(struct rte_flow_hw)
13519 : : * + sizeof(struct rte_flow_nt2hws)) for HWS mode.
13520 : : */
13521 : 0 : *flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
13522 [ # # ]: 0 : if (!(*flow))
13523 : 0 : return rte_flow_error_set(error, ENOMEM,
13524 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13525 : : "cannot allocate flow memory");
13526 : : /* Allocating 2 structures in one pool slot, updating nt2hw pointer.*/
13527 : 0 : (*flow)->nt2hws = (struct rte_flow_nt2hws *)
13528 : 0 : ((uintptr_t)(*flow) + sizeof(struct rte_flow_hw));
13529 : 0 : (*flow)->idx = idx;
13530 : 0 : (*flow)->nt2hws->flow_aux = (struct rte_flow_hw_aux *)
13531 : 0 : ((uintptr_t)((*flow)->nt2hws) + sizeof(struct rte_flow_nt2hws));
13532 : :
13533 [ # # ]: 0 : if (!(*flow)->nt2hws->flow_aux)
13534 : 0 : return rte_flow_error_set(error, ENOMEM,
13535 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
13536 : : "cannot allocate flow aux memory");
13537 : : return 0;
13538 : : }
13539 : :
13540 : : static inline void
13541 : 0 : flow_hw_set_dv_fields(struct rte_flow_template_table_attr *table_attr, uint32_t fdb_unified_en,
13542 : : bool *root, uint8_t *ft_type, uint64_t *flags)
13543 : : {
13544 [ # # ]: 0 : if (table_attr->flow_attr.transfer)
13545 : 0 : *ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
13546 : : else
13547 : 0 : *ft_type = table_attr->flow_attr.egress ?
13548 : 0 : MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
13549 : : MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
13550 : 0 : uint32_t group = table_attr->flow_attr.group;
13551 : 0 : *root = group ? 0 : 1;
13552 : 0 : *flags = mlx5_hw_act_flag[!!group][get_mlx5dr_table_type(&table_attr->flow_attr,
13553 : : table_attr->specialize,
13554 : : fdb_unified_en)];
13555 : 0 : }
13556 : :
13557 : : static int
13558 : 0 : flow_hw_modify_hdr_resource_register
13559 : : (struct rte_eth_dev *dev,
13560 : : struct rte_flow_template_table *table,
13561 : : struct mlx5_hw_actions *hw_acts,
13562 : : struct rte_flow_hw *dev_flow,
13563 : : struct rte_flow_error *error)
13564 : : {
13565 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
13566 : : bool unified_fdb = is_unified_fdb(priv);
13567 : 0 : struct rte_flow_template_table_attr *table_attr = &table->cfg.attr;
13568 : 0 : struct mlx5_flow_dv_modify_hdr_resource *dv_resource_ptr = NULL;
13569 : : union {
13570 : : struct mlx5_flow_dv_modify_hdr_resource dv_resource;
13571 : : uint8_t data[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
13572 : : sizeof(struct mlx5_modification_cmd) * MLX5_MHDR_MAX_CMD];
13573 : : } dummy;
13574 : : int ret;
13575 : :
13576 [ # # ]: 0 : if (hw_acts->mhdr) {
13577 : 0 : dummy.dv_resource.actions_num = hw_acts->mhdr->mhdr_cmds_num;
13578 : 0 : memcpy(dummy.dv_resource.actions, hw_acts->mhdr->mhdr_cmds,
13579 : 0 : sizeof(struct mlx5_modification_cmd) * dummy.dv_resource.actions_num);
13580 : : } else {
13581 : : return 0;
13582 : : }
13583 : 0 : flow_hw_set_dv_fields(table_attr, unified_fdb,
13584 : : &dummy.dv_resource.root, &dummy.dv_resource.ft_type,
13585 : : &dummy.dv_resource.flags);
13586 : 0 : dummy.dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13587 : 0 : ret = mlx5_flow_modify_hdr_resource_register(dev, &dummy.dv_resource,
13588 : : &dv_resource_ptr, error);
13589 [ # # ]: 0 : if (ret)
13590 : : return ret;
13591 : : MLX5_ASSERT(dv_resource_ptr);
13592 : 0 : dev_flow->nt2hws->modify_hdr = dv_resource_ptr;
13593 : : /* keep action for the rule construction. */
13594 : 0 : hw_acts->rule_acts[hw_acts->mhdr->pos].action = dv_resource_ptr->action;
13595 : : /* Bulk size is 1, so index is 1. */
13596 : 0 : dev_flow->res_idx = 1;
13597 : 0 : return 0;
13598 : : }
13599 : :
13600 : : static int
13601 : 0 : flow_hw_encap_decap_resource_register
13602 : : (struct rte_eth_dev *dev,
13603 : : struct rte_flow_template_table *table,
13604 : : struct mlx5_hw_actions *hw_acts,
13605 : : struct rte_flow_hw *dev_flow,
13606 : : struct rte_flow_error *error)
13607 : : {
13608 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
13609 : : bool unified_fdb = is_unified_fdb(priv);
13610 : 0 : struct rte_flow_template_table_attr *table_attr = &table->cfg.attr;
13611 : 0 : struct mlx5_flow_dv_encap_decap_resource *dv_resource_ptr = NULL;
13612 : : struct mlx5_flow_dv_encap_decap_resource dv_resource;
13613 : : struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
13614 : : int ret;
13615 : : bool is_root;
13616 : : int ix;
13617 : :
13618 [ # # ]: 0 : if (hw_acts->encap_decap)
13619 : 0 : dv_resource.reformat_type = hw_acts->encap_decap->action_type;
13620 : : else
13621 : : return 0;
13622 : 0 : flow_hw_set_dv_fields(table_attr, unified_fdb, &is_root, &dv_resource.ft_type,
13623 : : &dv_resource.flags);
13624 [ # # ]: 0 : ix = mlx5_bwc_multi_pattern_reformat_to_index((enum mlx5dr_action_type)
13625 : : dv_resource.reformat_type);
13626 : : if (ix < 0)
13627 : 0 : return ix;
13628 [ # # ]: 0 : if (hw_acts->encap_decap->shared) {
13629 : 0 : dv_resource.size = hw_acts->encap_decap->data_size;
13630 : : MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13631 : 0 : memcpy(&dv_resource.buf, hw_acts->encap_decap->data, dv_resource.size);
13632 : 0 : dv_resource.flags |= MLX5DR_ACTION_FLAG_SHARED;
13633 : : } else {
13634 : 0 : typeof(mpctx->reformat[0]) *reformat = mpctx->reformat + ix;
13635 [ # # ]: 0 : if (!reformat->elements_num)
13636 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
13637 : : NULL, "No reformat action exist in the table.");
13638 : 0 : dv_resource.size = reformat->reformat_hdr->sz;
13639 : : MLX5_ASSERT(dv_resource.size <= MLX5_ENCAP_MAX_LEN);
13640 : 0 : memcpy(&dv_resource.buf, reformat->reformat_hdr->data, dv_resource.size);
13641 : : }
13642 : 0 : ret = mlx5_flow_encap_decap_resource_register(dev, &dv_resource, is_root,
13643 : : &dv_resource_ptr, error);
13644 [ # # ]: 0 : if (ret)
13645 : : return ret;
13646 : : MLX5_ASSERT(dv_resource_ptr);
13647 : 0 : dev_flow->nt2hws->rix_encap_decap = dv_resource_ptr->idx;
13648 : : /* keep action for the rule construction. */
13649 [ # # ]: 0 : if (hw_acts->encap_decap->shared)
13650 : 0 : hw_acts->rule_acts[hw_acts->encap_decap_pos].action = dv_resource_ptr->action;
13651 : : else
13652 : 0 : mpctx->segments[0].reformat_action[ix] = dv_resource_ptr->action;
13653 : : /* Bulk size is 1, so index is 1. */
13654 : 0 : dev_flow->res_idx = 1;
13655 : 0 : return 0;
13656 : : }
13657 : :
13658 : : static enum rte_flow_action_type
13659 : : flow_nta_get_indirect_action_type(const struct rte_flow_action *action)
13660 : : {
13661 [ # # ]: 0 : switch (MLX5_INDIRECT_ACTION_TYPE_GET(action->conf)) {
13662 : : case MLX5_INDIRECT_ACTION_TYPE_RSS:
13663 : : return RTE_FLOW_ACTION_TYPE_RSS;
13664 : : case MLX5_INDIRECT_ACTION_TYPE_AGE:
13665 : : return RTE_FLOW_ACTION_TYPE_AGE;
13666 : : case MLX5_INDIRECT_ACTION_TYPE_COUNT:
13667 : : return RTE_FLOW_ACTION_TYPE_COUNT;
13668 : : case MLX5_INDIRECT_ACTION_TYPE_CT:
13669 : : return RTE_FLOW_ACTION_TYPE_CONNTRACK;
13670 : : default:
13671 : : break;
13672 : : }
13673 : : return RTE_FLOW_ACTION_TYPE_END;
13674 : : }
13675 : :
13676 : : static void
13677 : : flow_nta_set_mh_mask_conf(const struct rte_flow_action_modify_field *action_conf,
13678 : : struct rte_flow_action_modify_field *mask_conf)
13679 : : {
13680 : : memset(mask_conf, 0xff, sizeof(*mask_conf));
13681 : 0 : mask_conf->operation = action_conf->operation;
13682 : 0 : mask_conf->dst.field = action_conf->dst.field;
13683 : 0 : mask_conf->src.field = action_conf->src.field;
13684 : : }
13685 : :
13686 : : union actions_conf {
13687 : : struct rte_flow_action_modify_field modify_field;
13688 : : struct rte_flow_action_raw_encap raw_encap;
13689 : : struct rte_flow_action_vxlan_encap vxlan_encap;
13690 : : struct rte_flow_action_nvgre_encap nvgre_encap;
13691 : : };
13692 : :
13693 : : static int
13694 : 0 : flow_nta_build_template_mask(const struct rte_flow_action actions[],
13695 : : struct rte_flow_action masks[MLX5_HW_MAX_ACTS],
13696 : : union actions_conf mask_conf[MLX5_HW_MAX_ACTS])
13697 : : {
13698 : : int i;
13699 : :
13700 [ # # # # ]: 0 : for (i = 0; i == 0 || actions[i - 1].type != RTE_FLOW_ACTION_TYPE_END; i++) {
13701 : 0 : const struct rte_flow_action *action = &actions[i];
13702 : 0 : struct rte_flow_action *mask = &masks[i];
13703 : 0 : union actions_conf *conf = &mask_conf[i];
13704 : :
13705 : 0 : mask->type = action->type;
13706 [ # # # # : 0 : switch (action->type) {
# # # ]
13707 : : case RTE_FLOW_ACTION_TYPE_INDIRECT:
13708 : 0 : mask->type = flow_nta_get_indirect_action_type(action);
13709 [ # # ]: 0 : if (!mask->type)
13710 : : return -EINVAL;
13711 : : break;
13712 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
13713 : 0 : flow_nta_set_mh_mask_conf(action->conf, (void *)conf);
13714 : 0 : mask->conf = conf;
13715 : 0 : break;
13716 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
13717 : : /* This mask will set this action as shared. */
13718 : : memset(conf, 0xff, sizeof(struct rte_flow_action_raw_encap));
13719 : 0 : mask->conf = conf;
13720 : 0 : break;
13721 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
13722 : : /* This mask will set this action as shared. */
13723 : 0 : conf->vxlan_encap.definition =
13724 : : ((const struct rte_flow_action_vxlan_encap *)
13725 : 0 : action->conf)->definition;
13726 : 0 : mask->conf = conf;
13727 : 0 : break;
13728 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
13729 : : /* This mask will set this action as shared. */
13730 : 0 : conf->nvgre_encap.definition =
13731 : : ((const struct rte_flow_action_nvgre_encap *)
13732 : 0 : action->conf)->definition;
13733 : 0 : mask->conf = conf;
13734 : 0 : break;
13735 : : case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
13736 : : memset(conf, 0xff, sizeof(struct rte_flow_action_of_set_vlan_vid));
13737 : 0 : mask->conf = conf;
13738 : 0 : break;
13739 : : default:
13740 : : break;
13741 : : }
13742 : : }
13743 : : return 0;
13744 : : #undef NTA_CHECK_CONF_BUF_SIZE
13745 : : }
13746 : :
13747 : : static int
13748 : 0 : flow_hw_translate_flow_actions(struct rte_eth_dev *dev,
13749 : : const struct rte_flow_attr *attr,
13750 : : const struct rte_flow_action actions[],
13751 : : struct rte_flow_hw *flow,
13752 : : struct mlx5_flow_hw_action_params *ap,
13753 : : struct mlx5_hw_actions *hw_acts,
13754 : : uint64_t item_flags, uint64_t action_flags,
13755 : : bool external,
13756 : : struct rte_flow_error *error)
13757 : : {
13758 : : int ret = 0;
13759 : 0 : uint32_t src_group = 0;
13760 : : enum mlx5dr_table_type table_type;
13761 : : struct mlx5_flow_group grp;
13762 : : struct rte_flow_actions_template *at = NULL;
13763 : 0 : struct rte_flow_actions_template_attr template_attr = {
13764 : 0 : .egress = attr->egress,
13765 : 0 : .ingress = attr->ingress,
13766 : 0 : .transfer = attr->transfer,
13767 : : };
13768 : : struct rte_flow_action masks[MLX5_HW_MAX_ACTS];
13769 : : union actions_conf mask_conf[MLX5_HW_MAX_ACTS];
13770 : :
13771 : : RTE_SET_USED(action_flags);
13772 : : memset(masks, 0, sizeof(masks));
13773 : : memset(mask_conf, 0, sizeof(mask_conf));
13774 : : /* Only set the needed fields explicitly. */
13775 : 0 : struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
13776 : : struct rte_flow_template_table *table;
13777 : :
13778 : : /*
13779 : : * Notice All direct actions will be unmasked,
13780 : : * except for modify header and encap,
13781 : : * and therefore will be parsed as part of action construct.
13782 : : * Modify header is always shared in HWS,
13783 : : * encap is masked such that it will be treated as shared.
13784 : : * shared actions will be parsed as part of template translation
13785 : : * and not during action construct.
13786 : : */
13787 [ # # ]: 0 : if (!wks)
13788 : 0 : return rte_flow_error_set(error, ENOMEM,
13789 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13790 : : NULL,
13791 : : "failed to push flow workspace");
13792 : 0 : table = wks->table;
13793 : 0 : flow_nta_build_template_mask(actions, masks, mask_conf);
13794 : : /* The group in the attribute translation was done in advance. */
13795 : 0 : ret = __translate_group(dev, attr, external, attr->group, &src_group, error);
13796 [ # # ]: 0 : if (ret)
13797 : : return ret;
13798 [ # # ]: 0 : if (attr->transfer)
13799 : : table_type = MLX5DR_TABLE_TYPE_FDB;
13800 [ # # ]: 0 : else if (attr->egress)
13801 : : table_type = MLX5DR_TABLE_TYPE_NIC_TX;
13802 : : else
13803 : : table_type = MLX5DR_TABLE_TYPE_NIC_RX;
13804 : 0 : at = __flow_hw_actions_template_create(dev, &template_attr, actions, masks, true, error);
13805 [ # # ]: 0 : if (!at) {
13806 : 0 : ret = -rte_errno;
13807 : 0 : goto end;
13808 : : }
13809 : 0 : grp.group_id = src_group;
13810 : 0 : table->grp = &grp;
13811 : 0 : table->type = table_type;
13812 : 0 : table->cfg.external = external;
13813 : 0 : table->nb_action_templates = 1;
13814 : 0 : memcpy(&table->cfg.attr.flow_attr, attr, sizeof(*attr));
13815 : 0 : table->cfg.attr.flow_attr.group = src_group;
13816 : 0 : table->ats[0].action_template = at;
13817 : 0 : ret = __flow_hw_translate_actions_template(dev, &table->cfg, hw_acts, at,
13818 : : &table->mpctx, true, error);
13819 [ # # ]: 0 : if (ret)
13820 : 0 : goto end;
13821 : : /* handle bulk actions register. */
13822 : 0 : ret = flow_hw_encap_decap_resource_register(dev, table, hw_acts, flow, error);
13823 [ # # ]: 0 : if (ret)
13824 : 0 : goto end;
13825 : 0 : ret = flow_hw_modify_hdr_resource_register(dev, table, hw_acts, flow, error);
13826 [ # # ]: 0 : if (ret)
13827 : 0 : goto end;
13828 : 0 : table->ats[0].acts = *hw_acts;
13829 : 0 : ret = flow_hw_actions_construct(dev, flow, ap,
13830 : : &table->ats[0], item_flags, table,
13831 [ # # ]: 0 : actions, hw_acts->rule_acts, 0, error);
13832 [ # # ]: 0 : if (ret)
13833 : 0 : goto end;
13834 : 0 : goto end;
13835 : 0 : end:
13836 [ # # ]: 0 : if (ret)
13837 : : /* Make sure that there is no garbage in the actions. */
13838 : 0 : __flow_hw_action_template_destroy(dev, hw_acts);
13839 : : else
13840 : 0 : __flow_hw_act_data_flush(dev, hw_acts);
13841 [ # # ]: 0 : if (at)
13842 : 0 : mlx5_free(at);
13843 : 0 : mlx5_flow_pop_thread_workspace();
13844 : 0 : return ret;
13845 : : }
13846 : :
13847 : : static int
13848 : 0 : flow_hw_unregister_matcher(struct rte_eth_dev *dev,
13849 : : struct mlx5_flow_dv_matcher *matcher)
13850 : : {
13851 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13852 : 0 : struct mlx5_flow_group *group = matcher->group;
13853 : : int ret = 0;
13854 : :
13855 [ # # ]: 0 : if (group) {
13856 [ # # ]: 0 : if (matcher->matcher_object)
13857 : 0 : ret |= mlx5_list_unregister(group->matchers, &matcher->entry);
13858 : 0 : ret |= mlx5_hlist_unregister(priv->sh->groups, &group->entry);
13859 : : }
13860 : 0 : return ret;
13861 : : }
13862 : :
13863 : 0 : static int flow_hw_register_matcher(struct rte_eth_dev *dev,
13864 : : const struct rte_flow_attr *attr,
13865 : : const struct rte_flow_item items[],
13866 : : bool external,
13867 : : struct rte_flow_hw *flow,
13868 : : struct mlx5_flow_dv_matcher *matcher,
13869 : : struct rte_flow_error *error)
13870 : : {
13871 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13872 : 0 : struct rte_flow_error sub_error = {
13873 : : .type = RTE_FLOW_ERROR_TYPE_NONE,
13874 : : .cause = NULL,
13875 : : .message = NULL,
13876 : : };
13877 : 0 : struct rte_flow_attr flow_attr = *attr;
13878 : 0 : uint32_t specialize = 0; /* No unified FDB. */
13879 : 0 : struct mlx5_flow_cb_ctx ctx = {
13880 : : .dev = dev,
13881 : : .error = &sub_error,
13882 : : .data = &flow_attr,
13883 : : .data2 = &specialize,
13884 : : };
13885 : : void *items_ptr = &items;
13886 : 0 : struct mlx5_flow_cb_ctx matcher_ctx = {
13887 : : .error = &sub_error,
13888 : : .data = matcher,
13889 : : .data2 = items_ptr,
13890 : : };
13891 : : struct mlx5_list_entry *group_entry = NULL;
13892 : : struct mlx5_list_entry *matcher_entry = NULL;
13893 : : struct mlx5_flow_dv_matcher *resource;
13894 : : struct mlx5_list *matchers_list;
13895 : : struct mlx5_flow_group *flow_group;
13896 : : int ret;
13897 : :
13898 : :
13899 : 0 : matcher->crc = rte_raw_cksum((const void *)matcher->mask.buf,
13900 : : matcher->mask.size);
13901 : 0 : matcher->priority = attr->priority;
13902 : 0 : ret = __translate_group(dev, attr, external, attr->group, &flow_attr.group, error);
13903 [ # # ]: 0 : if (ret)
13904 : : return ret;
13905 : :
13906 : : /* Register the flow group. */
13907 : 0 : group_entry = mlx5_hlist_register(priv->sh->groups, flow_attr.group, &ctx);
13908 [ # # ]: 0 : if (!group_entry)
13909 : 0 : goto error;
13910 : : flow_group = container_of(group_entry, struct mlx5_flow_group, entry);
13911 : :
13912 : 0 : matchers_list = flow_group->matchers;
13913 : 0 : matcher->group = flow_group;
13914 : 0 : matcher_entry = mlx5_list_register(matchers_list, &matcher_ctx);
13915 [ # # ]: 0 : if (!matcher_entry)
13916 : 0 : goto error;
13917 : : resource = container_of(matcher_entry, typeof(*resource), entry);
13918 : 0 : flow->nt2hws->matcher = resource;
13919 : 0 : return 0;
13920 : :
13921 : 0 : error:
13922 [ # # ]: 0 : if (group_entry)
13923 : 0 : mlx5_hlist_unregister(priv->sh->groups, group_entry);
13924 [ # # ]: 0 : if (error) {
13925 [ # # ]: 0 : if (sub_error.type != RTE_FLOW_ERROR_TYPE_NONE)
13926 : : rte_memcpy(error, &sub_error, sizeof(sub_error));
13927 : : }
13928 : 0 : return rte_flow_error_set(error, rte_errno,
13929 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13930 : : NULL, "fail to register matcher");
13931 : : }
13932 : :
13933 : : static int
13934 : 0 : flow_hw_allocate_actions(struct rte_eth_dev *dev,
13935 : : uint64_t action_flags,
13936 : : struct rte_flow_error *error)
13937 : : {
13938 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
13939 : : int ret;
13940 : : uint obj_num;
13941 : :
13942 : 0 : error->type = RTE_FLOW_ERROR_TYPE_NONE;
13943 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_AGE) {
13944 : : /* If no age objects were previously allocated. */
13945 [ # # ]: 0 : if (!priv->hws_age_req) {
13946 : : /* If no counters were previously allocated. */
13947 [ # # ]: 0 : if (!priv->hws_cpool) {
13948 : 0 : obj_num = MLX5_CNT_NT_MAX(priv);
13949 : 0 : ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13950 : 0 : priv->nb_queue,
13951 : : NULL, error);
13952 [ # # ]: 0 : if (ret)
13953 : 0 : goto err;
13954 : : }
13955 : : /* Allocate same number of counters. */
13956 : 0 : ret = mlx5_hws_age_pool_init(dev, priv->hws_cpool->cfg.request_num,
13957 : 0 : priv->nb_queue, false);
13958 [ # # ]: 0 : if (ret)
13959 : 0 : goto err;
13960 : : }
13961 : : }
13962 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_COUNT) {
13963 : : /* If no counters were previously allocated. */
13964 [ # # ]: 0 : if (!priv->hws_cpool) {
13965 : 0 : obj_num = MLX5_CNT_NT_MAX(priv);
13966 : 0 : ret = mlx5_hws_cnt_pool_create(dev, obj_num,
13967 : 0 : priv->nb_queue, NULL,
13968 : : error);
13969 [ # # ]: 0 : if (ret)
13970 : 0 : goto err;
13971 : : }
13972 : : }
13973 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_CT) {
13974 : : /* If no CT were previously allocated. */
13975 [ # # ]: 0 : if (!priv->hws_ctpool) {
13976 : 0 : obj_num = MLX5_CT_NT_MAX(priv);
13977 : 0 : ret = mlx5_flow_ct_init(dev, obj_num, priv->nb_queue);
13978 [ # # ]: 0 : if (ret)
13979 : 0 : goto err;
13980 : : }
13981 : : }
13982 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_METER) {
13983 : : /* If no meters were previously allocated. */
13984 [ # # ]: 0 : if (!priv->hws_mpool) {
13985 : 0 : obj_num = MLX5_MTR_NT_MAX(priv);
13986 : 0 : ret = mlx5_flow_meter_init(dev, obj_num, 0, 0,
13987 : : priv->nb_queue);
13988 [ # # ]: 0 : if (ret)
13989 : 0 : goto err;
13990 : : }
13991 : : }
13992 : : return 0;
13993 : 0 : err:
13994 [ # # ]: 0 : if (ret && error->type != RTE_FLOW_ERROR_TYPE_NONE)
13995 : : return ret;
13996 : 0 : return rte_flow_error_set(error, ret,
13997 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13998 : : NULL, "fail to allocate actions");
13999 : : }
14000 : :
14001 : 0 : static int flow_hw_apply(const struct rte_flow_item items[],
14002 : : struct mlx5dr_rule_action rule_actions[],
14003 : : struct rte_flow_hw *flow,
14004 : : struct rte_flow_error *error)
14005 : : {
14006 : : struct mlx5dr_bwc_rule *rule = NULL;
14007 : :
14008 : 0 : rule = mlx5dr_bwc_rule_create((struct mlx5dr_bwc_matcher *)
14009 : 0 : flow->nt2hws->matcher->matcher_object,
14010 : : items, rule_actions);
14011 : 0 : flow->nt2hws->nt_rule = rule;
14012 [ # # ]: 0 : if (!rule) {
14013 : 0 : return rte_flow_error_set(error, EINVAL,
14014 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14015 : : NULL, "fail to create rte flow");
14016 : : }
14017 : : return 0;
14018 : : }
14019 : :
14020 : : #ifdef HAVE_MLX5_HWS_SUPPORT
14021 : : /**
14022 : : * Create a flow.
14023 : : *
14024 : : * @param[in] dev
14025 : : * Pointer to Ethernet device.
14026 : : * @param[in] type
14027 : : * Flow type.
14028 : : * @param[in] attr
14029 : : * Flow rule attributes.
14030 : : * @param[in] items
14031 : : * Pattern specification (list terminated by the END pattern item).
14032 : : * @param[in] actions
14033 : : * Associated actions (list terminated by the END action).
14034 : : * @param[in] external
14035 : : * This flow rule is created by request external to PMD.
14036 : : * @param[out] flow
14037 : : * Flow pointer
14038 : : * @param[out] error
14039 : : * Perform verbose error reporting if not NULL.
14040 : : *
14041 : : * @return
14042 : : * 0 on success, negative errno value otherwise and rte_errno set.
14043 : : */
14044 : : int
14045 : 0 : mlx5_flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type,
14046 : : const struct rte_flow_attr *attr,
14047 : : const struct rte_flow_item items[],
14048 : : const struct rte_flow_action actions[],
14049 : : uint64_t item_flags, uint64_t action_flags, bool external,
14050 : : struct rte_flow_hw **flow, struct rte_flow_error *error)
14051 : : {
14052 : : int ret;
14053 : 0 : struct mlx5_hw_actions hw_act = { { NULL } };
14054 : : struct mlx5_flow_hw_action_params ap;
14055 : 0 : struct mlx5_flow_dv_matcher matcher = {
14056 : : .mask = {
14057 : : .size = sizeof(matcher.mask.buf),
14058 : : },
14059 : : };
14060 : : uint32_t tbl_type;
14061 : :
14062 : 0 : struct mlx5_flow_attr flow_attr = {
14063 : 0 : .port_id = dev->data->port_id,
14064 : 0 : .group = attr->group,
14065 : 0 : .priority = attr->priority,
14066 : : .rss_level = 0,
14067 : : .act_flags = action_flags,
14068 : : .tbl_type = 0,
14069 : : };
14070 : :
14071 [ # # ]: 0 : if (attr->transfer)
14072 : : tbl_type = MLX5DR_TABLE_TYPE_FDB;
14073 [ # # ]: 0 : else if (attr->egress)
14074 : : tbl_type = MLX5DR_TABLE_TYPE_NIC_TX;
14075 : : else
14076 : : tbl_type = MLX5DR_TABLE_TYPE_NIC_RX;
14077 : 0 : flow_attr.tbl_type = tbl_type;
14078 : :
14079 : : /* Allocate needed memory. */
14080 : 0 : ret = flow_hw_prepare(dev, actions, type, flow, error);
14081 [ # # ]: 0 : if (ret)
14082 : 0 : goto error;
14083 : :
14084 : : /* TODO TBD flow_hw_handle_tunnel_offload(). */
14085 : 0 : (*flow)->nt_rule = true;
14086 : 0 : (*flow)->nt2hws->matcher = &matcher;
14087 : 0 : ret = mlx5_flow_dv_translate_items_hws_impl(items, &flow_attr, &matcher.mask.buf,
14088 : : MLX5_SET_MATCHER_HS_M, NULL,
14089 : : NULL, true, error);
14090 : :
14091 [ # # ]: 0 : if (ret)
14092 : 0 : goto error;
14093 : :
14094 [ # # # # ]: 0 : if (item_flags & MLX5_FLOW_LAYER_ECPRI && !mlx5_flex_parser_ecpri_exist(dev))
14095 [ # # ]: 0 : if (mlx5_flex_parser_ecpri_alloc(dev)) {
14096 : 0 : rte_flow_error_set(error, EIO,
14097 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
14098 : : NULL,
14099 : : "failed to create Flex parser "
14100 : : "profile for ECPRI");
14101 : 0 : goto error;
14102 : : }
14103 : 0 : ret = flow_hw_register_matcher(dev, attr, items, external, *flow, &matcher, error);
14104 [ # # ]: 0 : if (ret) {
14105 [ # # ]: 0 : if (rte_errno == E2BIG)
14106 : 0 : rte_flow_error_set(error, E2BIG, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
14107 : : "flow pattern is too big");
14108 : 0 : goto error;
14109 : : }
14110 : :
14111 : : /*
14112 : : * ASO allocation – iterating on actions list to allocate missing resources.
14113 : : * In the future when validate function in hws will be added,
14114 : : * The output actions bit mask instead of
14115 : : * looping on the actions array twice.
14116 : : */
14117 : 0 : ret = flow_hw_allocate_actions(dev, action_flags, error);
14118 [ # # ]: 0 : if (ret)
14119 : 0 : goto error;
14120 : :
14121 : : /* Note: the actions should be saved in the sub-flow rule itself for reference. */
14122 : 0 : ret = flow_hw_translate_flow_actions(dev, attr, actions, *flow, &ap, &hw_act,
14123 : : item_flags, action_flags, external, error);
14124 [ # # ]: 0 : if (ret)
14125 : 0 : goto error;
14126 : :
14127 : : /*
14128 : : * If the flow is external (from application) OR device is started,
14129 : : * OR mreg discover, then apply immediately.
14130 : : */
14131 [ # # # # ]: 0 : if (external || dev->data->dev_started ||
14132 [ # # ]: 0 : (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
14133 [ # # ]: 0 : attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
14134 : 0 : ret = flow_hw_apply(items, hw_act.rule_acts, *flow, error);
14135 [ # # ]: 0 : if (ret)
14136 : 0 : goto error;
14137 : : }
14138 : : ret = 0;
14139 : 0 : error:
14140 : : /*
14141 : : * Release memory allocated.
14142 : : * Cannot use __flow_hw_actions_release(dev, &hw_act);
14143 : : * since it destroys the actions as well.
14144 : : */
14145 [ # # ]: 0 : if (hw_act.encap_decap)
14146 : 0 : mlx5_free(hw_act.encap_decap);
14147 [ # # ]: 0 : if (hw_act.push_remove)
14148 : 0 : mlx5_free(hw_act.push_remove);
14149 [ # # ]: 0 : if (hw_act.mhdr)
14150 : 0 : mlx5_free(hw_act.mhdr);
14151 : 0 : return ret;
14152 : : }
14153 : : #endif
14154 : :
14155 : : void
14156 : 0 : mlx5_flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow)
14157 : : {
14158 : : int ret;
14159 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14160 : :
14161 [ # # # # ]: 0 : if (!flow || !flow->nt2hws)
14162 : : return;
14163 : :
14164 [ # # ]: 0 : if (flow->nt2hws->nt_rule) {
14165 : 0 : ret = mlx5dr_bwc_rule_destroy(flow->nt2hws->nt_rule);
14166 [ # # ]: 0 : if (ret)
14167 : 0 : DRV_LOG(ERR, "bwc rule destroy failed");
14168 : 0 : flow->nt2hws->nt_rule = NULL;
14169 : : }
14170 [ # # ]: 0 : flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY;
14171 : : /* Notice this function does not handle shared/static actions. */
14172 : : hw_cmpl_flow_update_or_destroy(dev, flow, 0, NULL);
14173 : :
14174 : : /**
14175 : : * TODO: TBD - Release tunnel related memory allocations(mlx5_flow_tunnel_free)
14176 : : * – needed only if supporting tunnel offloads, notice update RX queue flags in SWS.
14177 : : */
14178 : :
14179 : : /**
14180 : : * Notice matcher destroy will take place when matcher's list is destroyed
14181 : : * , same as for DV.
14182 : : */
14183 [ # # ]: 0 : if (flow->nt2hws->flow_aux)
14184 : 0 : flow->nt2hws->flow_aux = NULL;
14185 [ # # ]: 0 : if (flow->nt2hws->rix_encap_decap) {
14186 : 0 : mlx5_flow_encap_decap_resource_release(dev, flow->nt2hws->rix_encap_decap);
14187 : 0 : flow->nt2hws->rix_encap_decap = 0;
14188 : : }
14189 [ # # ]: 0 : if (flow->nt2hws->modify_hdr) {
14190 : : MLX5_ASSERT(flow->nt2hws->modify_hdr->action);
14191 : 0 : mlx5_hlist_unregister(priv->sh->modify_cmds,
14192 : : &flow->nt2hws->modify_hdr->entry);
14193 : 0 : flow->nt2hws->modify_hdr = NULL;
14194 : : }
14195 [ # # ]: 0 : if (flow->nt2hws->matcher) {
14196 : 0 : flow_hw_unregister_matcher(dev, flow->nt2hws->matcher);
14197 : 0 : flow->nt2hws->matcher = NULL;
14198 : : }
14199 [ # # ]: 0 : if (flow->nt2hws->sample_release_ctx != NULL) {
14200 : 0 : mlx5_nta_sample_mirror_entry_release(dev, flow->nt2hws->sample_release_ctx);
14201 : 0 : flow->nt2hws->sample_release_ctx = NULL;
14202 : : }
14203 : : }
14204 : :
14205 : : #ifdef HAVE_MLX5_HWS_SUPPORT
14206 : : /**
14207 : : * Destroy a flow.
14208 : : *
14209 : : * @param[in] dev
14210 : : * Pointer to Ethernet device.
14211 : : * @param[in] type
14212 : : * Flow type.
14213 : : * @param[in] flow_addr
14214 : : * Address of flow to destroy.
14215 : : */
14216 : : void
14217 : 0 : mlx5_flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
14218 : : uintptr_t flow_addr)
14219 : : {
14220 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14221 : 0 : struct rte_flow_hw *flow = (struct rte_flow_hw *)flow_addr;
14222 : : struct mlx5_nta_rss_flow_head head = { .slh_first = flow };
14223 : :
14224 [ # # # # : 0 : if (!flow || !flow->nt2hws || flow->nt2hws->chaned_flow)
# # ]
14225 : : return;
14226 : 0 : mlx5_flow_nta_del_copy_action(dev, flow->nt2hws->rix_mreg_copy);
14227 [ # # ]: 0 : while (!SLIST_EMPTY(&head)) {
14228 : : flow = SLIST_FIRST(&head);
14229 : 0 : SLIST_REMOVE_HEAD(&head, nt2hws->next);
14230 : 0 : mlx5_flow_hw_destroy(dev, flow);
14231 : : /* Release flow memory by idx */
14232 : 0 : mlx5_ipool_free(priv->flows[type], flow->idx);
14233 : : }
14234 : : }
14235 : : #endif
14236 : :
14237 : : /**
14238 : : * Create a flow.
14239 : : *
14240 : : * @param[in] dev
14241 : : * Pointer to Ethernet device.
14242 : : * @param[in] type
14243 : : * Flow type.
14244 : : * @param[in] attr
14245 : : * Flow rule attributes.
14246 : : * @param[in] items
14247 : : * Pattern specification (list terminated by the END pattern item).
14248 : : * @param[in] actions
14249 : : * Associated actions (list terminated by the END action).
14250 : : * @param[in] external
14251 : : * This flow rule is created by request external to PMD.
14252 : : * @param[out] error
14253 : : * Perform verbose error reporting if not NULL.
14254 : : *
14255 : : * @return
14256 : : * A flow addr on success, 0 otherwise and rte_errno is set.
14257 : : */
14258 : 0 : static uintptr_t flow_hw_list_create(struct rte_eth_dev *dev,
14259 : : enum mlx5_flow_type type,
14260 : : const struct rte_flow_attr *attr,
14261 : : const struct rte_flow_item items[],
14262 : : const struct rte_flow_action actions[],
14263 : : bool external,
14264 : : struct rte_flow_error *error)
14265 : : {
14266 : : int ret;
14267 : : int split;
14268 : : int encap_idx;
14269 : 0 : uint32_t cpy_idx = 0;
14270 : 0 : int actions_n = 0;
14271 : 0 : struct rte_flow_hw *flow = NULL;
14272 : 0 : struct rte_flow_hw *prfx_flow = NULL;
14273 : 0 : const struct rte_flow_action *qrss = NULL;
14274 : 0 : const struct rte_flow_action *mark = NULL;
14275 : 0 : uint64_t item_flags = 0;
14276 : 0 : uint64_t action_flags = mlx5_flow_hw_action_flags_get(actions, &qrss, &mark,
14277 : : &encap_idx, &actions_n, error);
14278 : 0 : struct mlx5_flow_hw_split_resource resource = {
14279 : : .suffix = {
14280 : : .attr = attr,
14281 : : .items = items,
14282 : : .actions = actions,
14283 : : },
14284 : : };
14285 : 0 : struct rte_flow_error shadow_error = {0, };
14286 : 0 : const struct rte_flow_pattern_template_attr pattern_template_attr = {
14287 : : .relaxed_matching = 0,
14288 : 0 : .ingress = attr->ingress,
14289 : 0 : .egress = attr->egress,
14290 : 0 : .transfer = attr->transfer,
14291 : : };
14292 : :
14293 : : /* Validate application items only */
14294 : 0 : ret = __flow_hw_pattern_validate(dev, &pattern_template_attr, items,
14295 : : &item_flags, true, error);
14296 [ # # ]: 0 : if (ret < 0)
14297 : : return 0;
14298 : :
14299 : : RTE_SET_USED(encap_idx);
14300 [ # # ]: 0 : if (!error)
14301 : : error = &shadow_error;
14302 : 0 : split = mlx5_flow_nta_split_metadata(dev, attr, actions, qrss, action_flags,
14303 : : actions_n, external, &resource, error);
14304 [ # # ]: 0 : if (split < 0)
14305 : 0 : return split;
14306 : :
14307 : : /* Update the metadata copy table - MLX5_FLOW_MREG_CP_TABLE_GROUP */
14308 [ # # # # : 0 : if (((attr->ingress && attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP) ||
# # ]
14309 [ # # ]: 0 : attr->transfer) && external) {
14310 : 0 : ret = mlx5_flow_nta_update_copy_table(dev, &cpy_idx, mark,
14311 : : action_flags, error);
14312 [ # # ]: 0 : if (ret)
14313 : 0 : goto free;
14314 : : }
14315 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
14316 : 0 : flow = mlx5_nta_sample_flow_list_create(dev, type, attr, items, actions,
14317 : : item_flags, action_flags, error);
14318 [ # # ]: 0 : if (flow != NULL)
14319 : 0 : return (uintptr_t)flow;
14320 : 0 : goto free;
14321 : : }
14322 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_RSS) {
14323 : : const struct rte_flow_action_rss
14324 : 0 : *rss_conf = mlx5_flow_nta_locate_rss(dev, actions, error);
14325 : 0 : flow = mlx5_flow_nta_handle_rss(dev, attr, items, actions, rss_conf,
14326 : : item_flags, action_flags, external,
14327 : : type, error);
14328 [ # # ]: 0 : if (flow) {
14329 : 0 : flow->nt2hws->rix_mreg_copy = cpy_idx;
14330 : 0 : cpy_idx = 0;
14331 [ # # ]: 0 : if (!split)
14332 : 0 : return (uintptr_t)flow;
14333 : 0 : goto prefix_flow;
14334 : : }
14335 : 0 : goto free;
14336 : : }
14337 : : /* Create single flow. */
14338 : 0 : ret = mlx5_flow_hw_create_flow(dev, type, resource.suffix.attr, resource.suffix.items,
14339 : : resource.suffix.actions, item_flags, action_flags,
14340 : : external, &flow, error);
14341 [ # # ]: 0 : if (ret)
14342 : 0 : goto free;
14343 [ # # ]: 0 : if (flow) {
14344 : 0 : flow->nt2hws->rix_mreg_copy = cpy_idx;
14345 : 0 : cpy_idx = 0;
14346 [ # # ]: 0 : if (!split)
14347 : 0 : return (uintptr_t)flow;
14348 : : /* Fall Through to prefix flow creation. */
14349 : : }
14350 : 0 : prefix_flow:
14351 : 0 : ret = mlx5_flow_hw_create_flow(dev, type, attr, items, resource.prefix.actions,
14352 : : item_flags, action_flags, external, &prfx_flow, error);
14353 [ # # ]: 0 : if (ret)
14354 : 0 : goto free;
14355 [ # # ]: 0 : if (prfx_flow) {
14356 : 0 : prfx_flow->nt2hws->rix_mreg_copy = flow->nt2hws->rix_mreg_copy;
14357 : 0 : flow->nt2hws->chaned_flow = 1;
14358 : 0 : SLIST_INSERT_AFTER(prfx_flow, flow, nt2hws->next);
14359 : 0 : mlx5_flow_nta_split_resource_free(dev, &resource);
14360 : 0 : return (uintptr_t)prfx_flow;
14361 : : }
14362 : 0 : free:
14363 [ # # ]: 0 : if (prfx_flow)
14364 : 0 : mlx5_flow_hw_list_destroy(dev, type, (uintptr_t)prfx_flow);
14365 [ # # ]: 0 : if (flow)
14366 : 0 : mlx5_flow_hw_list_destroy(dev, type, (uintptr_t)flow);
14367 [ # # ]: 0 : if (cpy_idx)
14368 : 0 : mlx5_flow_nta_del_copy_action(dev, cpy_idx);
14369 [ # # ]: 0 : if (split > 0)
14370 : 0 : mlx5_flow_nta_split_resource_free(dev, &resource);
14371 : : return 0;
14372 : : }
14373 : :
14374 : : static void
14375 : 0 : mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
14376 : : struct mlx5_mirror_clone *clone)
14377 : : {
14378 [ # # # ]: 0 : switch (clone->type) {
14379 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
14380 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
14381 : 0 : mlx5_hrxq_release(dev,
14382 : 0 : ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
14383 : 0 : break;
14384 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
14385 : 0 : flow_hw_jump_release(dev, clone->action_ctx);
14386 : : break;
14387 : : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14388 : : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14389 : : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14390 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14391 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14392 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14393 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14394 : : default:
14395 : : break;
14396 : : }
14397 : 0 : }
14398 : :
14399 : : void
14400 [ # # ]: 0 : mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
14401 : : {
14402 : : uint32_t i;
14403 : :
14404 : : mlx5_indirect_list_remove_entry(&mirror->indirect);
14405 [ # # ]: 0 : for (i = 0; i < mirror->clones_num; i++)
14406 : 0 : mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
14407 [ # # ]: 0 : if (mirror->mirror_action)
14408 : 0 : mlx5dr_action_destroy(mirror->mirror_action);
14409 : 0 : mlx5_free(mirror);
14410 : 0 : }
14411 : :
14412 : : static __rte_always_inline bool
14413 : : mlx5_mirror_terminal_action(const struct rte_flow_action *action)
14414 : : {
14415 : 0 : switch (action->type) {
14416 : : case RTE_FLOW_ACTION_TYPE_JUMP:
14417 : : case RTE_FLOW_ACTION_TYPE_RSS:
14418 : : case RTE_FLOW_ACTION_TYPE_QUEUE:
14419 : : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14420 : : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14421 : : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14422 : : case RTE_FLOW_ACTION_TYPE_DROP:
14423 : : return true;
14424 : : default:
14425 : : break;
14426 : : }
14427 : : return false;
14428 : : }
14429 : :
14430 : : static bool
14431 : 0 : mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
14432 : : const struct rte_flow_attr *flow_attr,
14433 : : const struct rte_flow_action *action)
14434 : : {
14435 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14436 : : const struct rte_flow_action_ethdev *port = NULL;
14437 [ # # # # ]: 0 : bool is_proxy = MLX5_HW_PORT_IS_PROXY(priv);
14438 : :
14439 [ # # ]: 0 : if (!action)
14440 : : return false;
14441 [ # # # # : 0 : switch (action->type) {
# ]
14442 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
14443 : : case RTE_FLOW_ACTION_TYPE_RSS:
14444 [ # # ]: 0 : if (flow_attr->transfer)
14445 : : return false;
14446 : : break;
14447 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14448 [ # # # # ]: 0 : if (!is_proxy || !flow_attr->transfer)
14449 : : return false;
14450 : 0 : port = action->conf;
14451 [ # # # # ]: 0 : if (!port || port->port_id != MLX5_REPRESENTED_PORT_ESW_MGR)
14452 : : return false;
14453 : : break;
14454 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14455 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14456 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14457 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14458 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14459 [ # # # # ]: 0 : if (!is_proxy || !flow_attr->transfer)
14460 : : return false;
14461 [ # # ]: 0 : if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
14462 [ # # ]: 0 : action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
14463 : : return false;
14464 : : break;
14465 : : case RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX:
14466 : : case RTE_FLOW_ACTION_TYPE_JUMP:
14467 : : case RTE_FLOW_ACTION_TYPE_DROP:
14468 : : break;
14469 : : default:
14470 : : return false;
14471 : : }
14472 : 0 : return true;
14473 : : }
14474 : :
14475 : : /**
14476 : : * Valid mirror actions list includes one or two SAMPLE actions
14477 : : * followed by JUMP.
14478 : : *
14479 : : * @return
14480 : : * Number of mirrors *action* list was valid.
14481 : : * -EINVAL otherwise.
14482 : : */
14483 : : static int
14484 : 0 : mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
14485 : : const struct rte_flow_attr *flow_attr,
14486 : : const struct rte_flow_action *actions)
14487 : : {
14488 [ # # ]: 0 : if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14489 : : int i = 1;
14490 : : bool valid;
14491 : 0 : const struct rte_flow_action_sample *sample = actions[0].conf;
14492 : 0 : valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14493 : 0 : sample->actions);
14494 [ # # ]: 0 : if (!valid)
14495 : : return -EINVAL;
14496 [ # # ]: 0 : if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14497 : : i = 2;
14498 : 0 : sample = actions[1].conf;
14499 : 0 : valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
14500 : 0 : sample->actions);
14501 [ # # ]: 0 : if (!valid)
14502 : : return -EINVAL;
14503 : : }
14504 [ # # ]: 0 : return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
14505 : : }
14506 : : return -EINVAL;
14507 : : }
14508 : :
14509 : : static int
14510 : 0 : mirror_format_tir(struct rte_eth_dev *dev,
14511 : : struct mlx5_mirror_clone *clone,
14512 : : const struct mlx5_flow_template_table_cfg *table_cfg,
14513 : : const struct rte_flow_action *action,
14514 : : struct mlx5dr_action_dest_attr *dest_attr,
14515 : : struct rte_flow_error *error)
14516 : : {
14517 : : uint32_t hws_flags;
14518 : : enum mlx5dr_table_type table_type;
14519 : : struct mlx5_hrxq *tir_ctx;
14520 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14521 : : bool unified_fdb = is_unified_fdb(priv);
14522 : :
14523 : 0 : table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr, table_cfg->attr.specialize,
14524 : : unified_fdb);
14525 : 0 : hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14526 : 0 : tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
14527 [ # # ]: 0 : if (!tir_ctx)
14528 : 0 : return rte_flow_error_set(error, EINVAL,
14529 : : RTE_FLOW_ERROR_TYPE_ACTION,
14530 : : action, "failed to create QUEUE action for mirror clone");
14531 : 0 : dest_attr->dest = tir_ctx->action;
14532 : 0 : clone->action_ctx = tir_ctx;
14533 : 0 : return 0;
14534 : : }
14535 : :
14536 : : static int
14537 : 0 : mirror_format_jump(struct rte_eth_dev *dev,
14538 : : struct mlx5_mirror_clone *clone,
14539 : : const struct mlx5_flow_template_table_cfg *table_cfg,
14540 : : const struct rte_flow_action *action,
14541 : : struct mlx5dr_action_dest_attr *dest_attr,
14542 : : struct rte_flow_error *error)
14543 : : {
14544 : 0 : const struct rte_flow_action_jump *jump_conf = action->conf;
14545 : 0 : struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
14546 : : (dev, table_cfg,
14547 : 0 : jump_conf->group, error);
14548 : :
14549 [ # # ]: 0 : if (!jump)
14550 : 0 : return rte_flow_error_set(error, EINVAL,
14551 : : RTE_FLOW_ERROR_TYPE_ACTION,
14552 : : action, "failed to create JUMP action for mirror clone");
14553 : 0 : dest_attr->dest = jump->hws_action;
14554 : 0 : clone->action_ctx = jump;
14555 : 0 : return 0;
14556 : : }
14557 : :
14558 : : static int
14559 : : mirror_format_port(struct rte_eth_dev *dev,
14560 : : const struct rte_flow_action *action,
14561 : : struct mlx5dr_action_dest_attr *dest_attr,
14562 : : struct rte_flow_error __rte_unused *error)
14563 : : {
14564 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14565 : 0 : const struct rte_flow_action_ethdev *port_action = action->conf;
14566 : :
14567 : 0 : dest_attr->dest = priv->hw_vport[port_action->port_id];
14568 : : return 0;
14569 : : }
14570 : :
14571 : : static int
14572 : 0 : hw_mirror_clone_reformat(const struct rte_flow_action *actions,
14573 : : struct mlx5dr_action_dest_attr *dest_attr,
14574 : : enum mlx5dr_action_type *action_type,
14575 : : uint8_t *reformat_buf, bool decap)
14576 : : {
14577 : : int ret;
14578 : : const struct rte_flow_item *encap_item = NULL;
14579 : : const struct rte_flow_action_raw_encap *encap_conf = NULL;
14580 : : typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
14581 : :
14582 [ # # # # ]: 0 : switch (actions[0].type) {
14583 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14584 : 0 : encap_conf = actions[0].conf;
14585 : 0 : break;
14586 : 0 : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14587 : 0 : encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
14588 : : actions);
14589 : 0 : break;
14590 : 0 : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14591 : 0 : encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
14592 : : actions);
14593 : 0 : break;
14594 : : default:
14595 : : return -EINVAL;
14596 : : }
14597 : 0 : *action_type = decap ?
14598 [ # # ]: 0 : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
14599 : : MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
14600 [ # # ]: 0 : if (encap_item) {
14601 : 0 : ret = mlx5_flow_dv_convert_encap_data(encap_item, reformat_buf,
14602 : : &reformat->reformat_data_sz, NULL);
14603 [ # # ]: 0 : if (ret)
14604 : : return -EINVAL;
14605 : 0 : reformat->reformat_data = reformat_buf;
14606 : : } else {
14607 : 0 : reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
14608 : 0 : reformat->reformat_data_sz = encap_conf->size;
14609 : : }
14610 : : return 0;
14611 : : }
14612 : :
14613 : : static int
14614 : 0 : hw_mirror_format_clone(struct rte_eth_dev *dev,
14615 : : struct mlx5_mirror_clone *clone,
14616 : : const struct mlx5_flow_template_table_cfg *table_cfg,
14617 : : const struct rte_flow_action *actions,
14618 : : struct mlx5dr_action_dest_attr *dest_attr,
14619 : : uint8_t *reformat_buf,
14620 : : enum mlx5dr_table_type table_type,
14621 : : struct rte_flow_error *error)
14622 : : {
14623 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14624 : : int ret;
14625 : : uint32_t i;
14626 : : bool decap_seen = false;
14627 : :
14628 [ # # ]: 0 : for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
14629 : 0 : dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
14630 [ # # # # : 0 : switch (actions[i].type) {
# # # # ]
14631 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
14632 : : case RTE_FLOW_ACTION_TYPE_RSS:
14633 : 0 : ret = mirror_format_tir(dev, clone, table_cfg,
14634 : : &actions[i], dest_attr, error);
14635 [ # # ]: 0 : if (ret)
14636 : 0 : return ret;
14637 : : break;
14638 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
14639 : : ret = mirror_format_port(dev, &actions[i],
14640 : : dest_attr, error);
14641 : : if (ret)
14642 : : return ret;
14643 : : break;
14644 : 0 : case RTE_FLOW_ACTION_TYPE_JUMP:
14645 : 0 : ret = mirror_format_jump(dev, clone, table_cfg,
14646 : : &actions[i], dest_attr, error);
14647 [ # # ]: 0 : if (ret)
14648 : 0 : return ret;
14649 : : break;
14650 : 0 : case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
14651 : 0 : dest_attr->dest = mlx5_hws_global_action_def_miss_get(priv,
14652 : : table_type,
14653 : : false);
14654 [ # # ]: 0 : if (dest_attr->dest == NULL)
14655 : 0 : return rte_flow_error_set(error, ENOMEM,
14656 : : RTE_FLOW_ERROR_TYPE_STATE, NULL,
14657 : : "failed to allocate port representor action");
14658 : : break;
14659 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14660 : : decap_seen = true;
14661 : 0 : break;
14662 : 0 : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14663 : : case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
14664 : : case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
14665 : 0 : ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
14666 : : &dest_attr->action_type[i],
14667 : : reformat_buf, decap_seen);
14668 [ # # ]: 0 : if (ret < 0)
14669 : 0 : return rte_flow_error_set(error, EINVAL,
14670 : : RTE_FLOW_ERROR_TYPE_ACTION,
14671 : : &actions[i],
14672 : : "failed to create reformat action");
14673 : : break;
14674 : : case RTE_FLOW_ACTION_TYPE_DROP:
14675 : : break;
14676 : 0 : default:
14677 : 0 : return rte_flow_error_set(error, EINVAL,
14678 : : RTE_FLOW_ERROR_TYPE_ACTION,
14679 : : &actions[i], "unsupported sample action");
14680 : : }
14681 : 0 : clone->type = actions->type;
14682 : : }
14683 : 0 : dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
14684 : 0 : return 0;
14685 : : }
14686 : :
14687 : : struct mlx5_mirror *
14688 : 0 : mlx5_hw_create_mirror(struct rte_eth_dev *dev,
14689 : : const struct mlx5_flow_template_table_cfg *table_cfg,
14690 : : const struct rte_flow_action *actions,
14691 : : struct rte_flow_error *error)
14692 : : {
14693 : : uint32_t hws_flags;
14694 : : int ret = 0, i, clones_num;
14695 : : struct mlx5_mirror *mirror;
14696 : : enum mlx5dr_table_type table_type;
14697 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
14698 : : bool unified_fdb = is_unified_fdb(priv);
14699 [ # # ]: 0 : const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14700 : : uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
14701 : : struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
14702 : : enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
14703 : : [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
14704 : :
14705 : : memset(mirror_attr, 0, sizeof(mirror_attr));
14706 : : memset(array_action_types, 0, sizeof(array_action_types));
14707 : 0 : table_type = get_mlx5dr_table_type(flow_attr, table_cfg->attr.specialize, unified_fdb);
14708 : 0 : hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
14709 : 0 : clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
14710 : : actions);
14711 [ # # ]: 0 : if (clones_num < 0) {
14712 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14713 : : actions, "Invalid mirror list format");
14714 : 0 : return NULL;
14715 : : }
14716 : 0 : mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
14717 : : 0, SOCKET_ID_ANY);
14718 [ # # ]: 0 : if (!mirror) {
14719 : 0 : rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
14720 : : actions, "Failed to allocate mirror context");
14721 : 0 : return NULL;
14722 : : }
14723 : :
14724 : 0 : mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14725 : 0 : mirror->clones_num = clones_num;
14726 [ # # ]: 0 : for (i = 0; i < clones_num; i++) {
14727 : : const struct rte_flow_action *clone_actions;
14728 : :
14729 : 0 : mirror_attr[i].action_type = array_action_types[i];
14730 [ # # ]: 0 : if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
14731 : 0 : const struct rte_flow_action_sample *sample = actions[i].conf;
14732 : :
14733 : 0 : clone_actions = sample->actions;
14734 : : } else {
14735 : : clone_actions = &actions[i];
14736 : : }
14737 : 0 : ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
14738 : : clone_actions, &mirror_attr[i],
14739 : 0 : reformat_buf[i], table_type, error);
14740 : :
14741 [ # # ]: 0 : if (ret)
14742 : 0 : goto error;
14743 : : }
14744 : 0 : hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
14745 : 0 : mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
14746 : : clones_num,
14747 : : mirror_attr,
14748 : : hws_flags);
14749 [ # # ]: 0 : if (!mirror->mirror_action) {
14750 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14751 : : actions, "Failed to create HWS mirror action");
14752 : 0 : goto error;
14753 : : }
14754 : : return mirror;
14755 : 0 : error:
14756 : 0 : mlx5_hw_mirror_destroy(dev, mirror);
14757 : 0 : return NULL;
14758 : : }
14759 : :
14760 : : static struct rte_flow_action_list_handle *
14761 : 0 : mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
14762 : : const struct mlx5_flow_template_table_cfg *table_cfg,
14763 : : const struct rte_flow_action *actions,
14764 : : struct rte_flow_error *error)
14765 : : {
14766 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14767 : 0 : struct mlx5_mirror *mirror = mlx5_hw_create_mirror(dev, table_cfg, actions, error);
14768 : :
14769 [ # # ]: 0 : if (mirror)
14770 [ # # ]: 0 : mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
14771 : 0 : return (struct rte_flow_action_list_handle *)mirror;
14772 : : }
14773 : :
14774 : : void
14775 : 0 : mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
14776 : : struct mlx5_indirect_list *ptr)
14777 : : {
14778 : : struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
14779 : :
14780 : : switch (obj->legacy_type) {
14781 : : case RTE_FLOW_ACTION_TYPE_METER_MARK:
14782 : : break; /* ASO meters were released in mlx5_flow_meter_flush() */
14783 : : default:
14784 : : break;
14785 : : }
14786 : 0 : mlx5_free(obj);
14787 : 0 : }
14788 : :
14789 : : static struct rte_flow_action_list_handle *
14790 : 0 : mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
14791 : : const struct rte_flow_op_attr *attr,
14792 : : const struct rte_flow_indir_action_conf *conf,
14793 : : const struct rte_flow_action *actions,
14794 : : void *user_data, struct rte_flow_error *error)
14795 : : {
14796 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14797 : 0 : struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
14798 : : sizeof(*indlst_obj),
14799 : : 0, SOCKET_ID_ANY);
14800 : :
14801 [ # # ]: 0 : if (!indlst_obj)
14802 : : return NULL;
14803 : 0 : indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
14804 : : actions, user_data,
14805 : : error);
14806 [ # # ]: 0 : if (!indlst_obj->handle) {
14807 : 0 : mlx5_free(indlst_obj);
14808 : 0 : return NULL;
14809 : : }
14810 : 0 : indlst_obj->legacy_type = actions[0].type;
14811 : 0 : indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
14812 [ # # ]: 0 : mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
14813 : 0 : return (struct rte_flow_action_list_handle *)indlst_obj;
14814 : : }
14815 : :
14816 : : static __rte_always_inline enum mlx5_indirect_list_type
14817 : : flow_hw_inlist_type_get(const struct rte_flow_action *actions)
14818 : : {
14819 [ # # # # ]: 0 : switch (actions[0].type) {
14820 : : case RTE_FLOW_ACTION_TYPE_SAMPLE:
14821 : : return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
14822 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
14823 : 0 : return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
14824 : 0 : MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
14825 : : MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14826 : : case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
14827 : : case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
14828 : : return MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14829 : : default:
14830 : : break;
14831 : : }
14832 : : return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
14833 : : }
14834 : :
14835 : : static struct rte_flow_action_list_handle*
14836 : 0 : mlx5_hw_decap_encap_handle_create(struct rte_eth_dev *dev,
14837 : : const struct mlx5_flow_template_table_cfg *table_cfg,
14838 : : const struct rte_flow_action *actions,
14839 : : struct rte_flow_error *error)
14840 : : {
14841 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14842 : : const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
14843 : : const struct rte_flow_action *encap = NULL;
14844 : : const struct rte_flow_action *decap = NULL;
14845 : 0 : struct rte_flow_indir_action_conf indirect_conf = {
14846 : 0 : .ingress = flow_attr->ingress,
14847 : 0 : .egress = flow_attr->egress,
14848 : 0 : .transfer = flow_attr->transfer,
14849 : : };
14850 : : struct mlx5_hw_encap_decap_action *handle;
14851 : : uint64_t action_flags = 0;
14852 : :
14853 : : /*
14854 : : * Allow
14855 : : * 1. raw_decap / raw_encap / end
14856 : : * 2. raw_encap / end
14857 : : * 3. raw_decap / end
14858 : : */
14859 [ # # ]: 0 : while (actions->type != RTE_FLOW_ACTION_TYPE_END) {
14860 [ # # ]: 0 : if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) {
14861 [ # # ]: 0 : if (action_flags) {
14862 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14863 : : actions, "Invalid indirect action list sequence");
14864 : 0 : return NULL;
14865 : : }
14866 : : action_flags |= MLX5_FLOW_ACTION_DECAP;
14867 : : decap = actions;
14868 [ # # ]: 0 : } else if (actions->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
14869 [ # # ]: 0 : if (action_flags & MLX5_FLOW_ACTION_ENCAP) {
14870 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14871 : : actions, "Invalid indirect action list sequence");
14872 : 0 : return NULL;
14873 : : }
14874 : 0 : action_flags |= MLX5_FLOW_ACTION_ENCAP;
14875 : : encap = actions;
14876 : : } else {
14877 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14878 : : actions, "Invalid indirect action type in list");
14879 : 0 : return NULL;
14880 : : }
14881 : 0 : actions++;
14882 : : }
14883 [ # # ]: 0 : if (!decap && !encap) {
14884 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14885 : : actions, "Invalid indirect action combinations");
14886 : 0 : return NULL;
14887 : : }
14888 : 0 : handle = mlx5_reformat_action_create(dev, &indirect_conf, encap, decap, error);
14889 [ # # ]: 0 : if (!handle) {
14890 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14891 : : actions, "Failed to create HWS decap_encap action");
14892 : 0 : return NULL;
14893 : : }
14894 : 0 : handle->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT;
14895 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->indirect_list_head, &handle->indirect, entry);
14896 : 0 : return (struct rte_flow_action_list_handle *)handle;
14897 : : }
14898 : :
14899 : : static struct rte_flow_action_list_handle *
14900 [ # # ]: 0 : flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
14901 : : const struct rte_flow_op_attr *attr,
14902 : : const struct rte_flow_indir_action_conf *conf,
14903 : : const struct rte_flow_action *actions,
14904 : : void *user_data,
14905 : : struct rte_flow_error *error)
14906 : : {
14907 : : struct mlx5_hw_q_job *job = NULL;
14908 : : bool push = flow_hw_action_push(attr);
14909 : : enum mlx5_indirect_list_type list_type;
14910 : : struct rte_flow_action_list_handle *handle;
14911 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
14912 : 0 : struct mlx5_flow_template_table_cfg table_cfg = {
14913 : : .external = true,
14914 : : .attr = {
14915 : : .flow_attr = {
14916 : 0 : .ingress = conf->ingress,
14917 : 0 : .egress = conf->egress,
14918 : 0 : .transfer = conf->transfer
14919 : : }
14920 : : }
14921 : : };
14922 : :
14923 [ # # ]: 0 : if (!mlx5_hw_ctx_validate(dev, error))
14924 : : return NULL;
14925 [ # # ]: 0 : if (!actions) {
14926 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14927 : : NULL, "No action list");
14928 : 0 : return NULL;
14929 : : }
14930 : : list_type = flow_hw_inlist_type_get(actions);
14931 [ # # ]: 0 : if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
14932 : : /*
14933 : : * Legacy indirect actions already have
14934 : : * async resources management. No need to do it twice.
14935 : : */
14936 : 0 : handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
14937 : : actions, user_data, error);
14938 : 0 : goto end;
14939 : : }
14940 [ # # ]: 0 : if (attr) {
14941 : : job = flow_hw_action_job_init(priv, queue, NULL, user_data,
14942 : : NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
14943 : : MLX5_HW_INDIRECT_TYPE_LIST, error);
14944 : : if (!job)
14945 : 0 : return NULL;
14946 : : }
14947 [ # # # ]: 0 : switch (list_type) {
14948 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
14949 : : /*
14950 : : * Mirror action is only supported in HWS group. Setting group to
14951 : : * non-zero will ensure that the action resources are allocated correctly.
14952 : : */
14953 : 0 : table_cfg.attr.flow_attr.group = 1;
14954 : 0 : handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
14955 : : actions, error);
14956 : 0 : break;
14957 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
14958 : 0 : handle = mlx5_hw_decap_encap_handle_create(dev, &table_cfg,
14959 : : actions, error);
14960 : 0 : break;
14961 : 0 : default:
14962 : : handle = NULL;
14963 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
14964 : : actions, "Invalid list");
14965 : : }
14966 [ # # ]: 0 : if (job) {
14967 : 0 : job->action = handle;
14968 [ # # ]: 0 : flow_hw_action_finalize(dev, queue, job, push, false,
14969 : : handle != NULL);
14970 : : }
14971 : 0 : end:
14972 : : return handle;
14973 : : }
14974 : :
14975 : : static struct rte_flow_action_list_handle *
14976 : 0 : flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
14977 : : const struct rte_flow_indir_action_conf *conf,
14978 : : const struct rte_flow_action *actions,
14979 : : struct rte_flow_error *error)
14980 : : {
14981 : 0 : return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
14982 : : NULL, conf, actions,
14983 : : NULL, error);
14984 : : }
14985 : :
14986 : : static int
14987 [ # # ]: 0 : flow_hw_async_action_list_handle_destroy
14988 : : (struct rte_eth_dev *dev, uint32_t queue,
14989 : : const struct rte_flow_op_attr *attr,
14990 : : struct rte_flow_action_list_handle *handle,
14991 : : void *user_data, struct rte_flow_error *error)
14992 : : {
14993 : : int ret = 0;
14994 : : struct mlx5_hw_q_job *job = NULL;
14995 : : bool push = flow_hw_action_push(attr);
14996 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
14997 : : enum mlx5_indirect_list_type type =
14998 : : mlx5_get_indirect_list_type((void *)handle);
14999 : :
15000 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
15001 : : struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
15002 : :
15003 : 0 : ret = flow_hw_action_handle_destroy(dev, queue, attr,
15004 : : legacy->handle,
15005 : : user_data, error);
15006 : : mlx5_indirect_list_remove_entry(&legacy->indirect);
15007 : 0 : mlx5_free(legacy);
15008 : 0 : goto end;
15009 : : }
15010 [ # # ]: 0 : if (attr) {
15011 : : job = flow_hw_action_job_init(priv, queue, NULL, user_data,
15012 : : NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
15013 : : MLX5_HW_INDIRECT_TYPE_LIST, error);
15014 : : if (!job)
15015 : 0 : return rte_errno;
15016 : : }
15017 [ # # # ]: 0 : switch (type) {
15018 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
15019 : 0 : mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
15020 : 0 : break;
15021 : 0 : case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT:
15022 [ # # ]: 0 : LIST_REMOVE(&((struct mlx5_hw_encap_decap_action *)handle)->indirect,
15023 : : entry);
15024 : 0 : mlx5_reformat_action_destroy(dev, handle, error);
15025 : 0 : break;
15026 : 0 : default:
15027 : 0 : ret = rte_flow_error_set(error, EINVAL,
15028 : : RTE_FLOW_ERROR_TYPE_ACTION,
15029 : : NULL, "Invalid indirect list handle");
15030 : : }
15031 [ # # ]: 0 : if (job) {
15032 : : flow_hw_action_finalize(dev, queue, job, push, false, true);
15033 : : }
15034 : 0 : end:
15035 : : return ret;
15036 : : }
15037 : :
15038 : : static int
15039 : 0 : flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
15040 : : struct rte_flow_action_list_handle *handle,
15041 : : struct rte_flow_error *error)
15042 : : {
15043 : 0 : return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
15044 : : NULL, handle, NULL,
15045 : : error);
15046 : : }
15047 : :
15048 : : static int
15049 [ # # ]: 0 : flow_hw_async_action_list_handle_query_update
15050 : : (struct rte_eth_dev *dev, uint32_t queue_id,
15051 : : const struct rte_flow_op_attr *attr,
15052 : : const struct rte_flow_action_list_handle *handle,
15053 : : const void **update, void **query,
15054 : : enum rte_flow_query_update_mode mode,
15055 : : void *user_data, struct rte_flow_error *error)
15056 : : {
15057 : : enum mlx5_indirect_list_type type =
15058 : : mlx5_get_indirect_list_type((const void *)handle);
15059 : :
15060 [ # # ]: 0 : if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
15061 : : struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
15062 : :
15063 [ # # ]: 0 : if (update && query)
15064 : 0 : return flow_hw_async_action_handle_query_update
15065 : : (dev, queue_id, attr, legacy->handle,
15066 : : update, query, mode, user_data, error);
15067 [ # # # # ]: 0 : else if (update && update[0])
15068 : 0 : return flow_hw_action_handle_update(dev, queue_id, attr,
15069 : : legacy->handle, update[0],
15070 : : user_data, error);
15071 [ # # # # ]: 0 : else if (query && query[0])
15072 : 0 : return flow_hw_action_handle_query(dev, queue_id, attr,
15073 : 0 : legacy->handle, query[0],
15074 : : user_data, error);
15075 : : else
15076 : 0 : return rte_flow_error_set(error, EINVAL,
15077 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15078 : : NULL, "invalid legacy handle query_update parameters");
15079 : : }
15080 : : return -ENOTSUP;
15081 : : }
15082 : :
15083 : : static int
15084 : 0 : flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
15085 : : const struct rte_flow_action_list_handle *handle,
15086 : : const void **update, void **query,
15087 : : enum rte_flow_query_update_mode mode,
15088 : : struct rte_flow_error *error)
15089 : : {
15090 : 0 : return flow_hw_async_action_list_handle_query_update
15091 : : (dev, MLX5_HW_INV_QUEUE, NULL, handle,
15092 : : update, query, mode, NULL, error);
15093 : : }
15094 : :
15095 : : static int
15096 : 0 : flow_hw_calc_table_hash(struct rte_eth_dev *dev,
15097 : : const struct rte_flow_template_table *table,
15098 : : const struct rte_flow_item pattern[],
15099 : : uint8_t pattern_template_index,
15100 : : uint32_t *hash, struct rte_flow_error *error)
15101 : : {
15102 : : const struct rte_flow_item *items;
15103 : : struct mlx5_flow_hw_pattern_params pp;
15104 : : int res;
15105 : :
15106 : 0 : items = flow_hw_get_rule_items(dev, table, pattern,
15107 : : pattern_template_index,
15108 : : &pp);
15109 : 0 : res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
15110 : : pattern_template_index,
15111 : : MLX5DR_RULE_HASH_CALC_MODE_RAW,
15112 : : hash);
15113 [ # # ]: 0 : if (res)
15114 : 0 : return rte_flow_error_set(error, res,
15115 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15116 : : NULL,
15117 : : "hash could not be calculated");
15118 : : return 0;
15119 : : }
15120 : :
15121 : : static int
15122 : 0 : flow_hw_calc_encap_hash(struct rte_eth_dev *dev,
15123 : : const struct rte_flow_item pattern[],
15124 : : enum rte_flow_encap_hash_field dest_field,
15125 : : uint8_t *hash,
15126 : : struct rte_flow_error *error)
15127 : : {
15128 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
15129 : : struct mlx5dr_crc_encap_entropy_hash_fields data;
15130 : 0 : enum mlx5dr_crc_encap_entropy_hash_size res_size =
15131 : : dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT ?
15132 : 0 : MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_16 :
15133 : : MLX5DR_CRC_ENCAP_ENTROPY_HASH_SIZE_8;
15134 : : int res;
15135 : :
15136 : : memset(&data, 0, sizeof(struct mlx5dr_crc_encap_entropy_hash_fields));
15137 : :
15138 [ # # ]: 0 : for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
15139 [ # # # # : 0 : switch (pattern->type) {
# # # # ]
15140 : 0 : case RTE_FLOW_ITEM_TYPE_IPV4:
15141 : 0 : data.dst.ipv4_addr =
15142 : 0 : ((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.dst_addr;
15143 : 0 : data.src.ipv4_addr =
15144 : 0 : ((const struct rte_flow_item_ipv4 *)(pattern->spec))->hdr.src_addr;
15145 : 0 : data.next_protocol = ((const struct rte_flow_item_ipv4 *)
15146 : 0 : (pattern->spec))->hdr.next_proto_id;
15147 : 0 : break;
15148 : 0 : case RTE_FLOW_ITEM_TYPE_IPV6:
15149 : : memcpy(data.dst.ipv6_addr,
15150 : 0 : &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.dst_addr,
15151 : : sizeof(data.dst.ipv6_addr));
15152 : : memcpy(data.src.ipv6_addr,
15153 : : &((const struct rte_flow_item_ipv6 *)(pattern->spec))->hdr.src_addr,
15154 : : sizeof(data.src.ipv6_addr));
15155 : 0 : data.next_protocol = ((const struct rte_flow_item_ipv6 *)
15156 : 0 : (pattern->spec))->hdr.proto;
15157 : 0 : break;
15158 : 0 : case RTE_FLOW_ITEM_TYPE_UDP:
15159 : 0 : data.next_protocol = IPPROTO_UDP;
15160 : 0 : data.dst_port =
15161 : 0 : ((const struct rte_flow_item_udp *)(pattern->spec))->hdr.dst_port;
15162 : 0 : data.src_port =
15163 : 0 : ((const struct rte_flow_item_udp *)(pattern->spec))->hdr.src_port;
15164 : 0 : break;
15165 : 0 : case RTE_FLOW_ITEM_TYPE_TCP:
15166 : 0 : data.next_protocol = IPPROTO_TCP;
15167 : 0 : data.dst_port =
15168 : 0 : ((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.dst_port;
15169 : 0 : data.src_port =
15170 : 0 : ((const struct rte_flow_item_tcp *)(pattern->spec))->hdr.src_port;
15171 : 0 : break;
15172 : 0 : case RTE_FLOW_ITEM_TYPE_ICMP:
15173 : 0 : data.next_protocol = IPPROTO_ICMP;
15174 : 0 : break;
15175 : 0 : case RTE_FLOW_ITEM_TYPE_ICMP6:
15176 : 0 : data.next_protocol = IPPROTO_ICMPV6;
15177 : 0 : break;
15178 : 0 : case RTE_FLOW_ITEM_TYPE_GRE:
15179 : 0 : data.next_protocol = IPPROTO_GRE;
15180 : 0 : break;
15181 : : default:
15182 : : break;
15183 : : }
15184 : : }
15185 : 0 : res = mlx5dr_crc_encap_entropy_hash_calc(priv->dr_ctx, &data, hash, res_size);
15186 [ # # ]: 0 : if (res)
15187 : 0 : return rte_flow_error_set(error, res,
15188 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15189 : : NULL, "error while calculating encap hash");
15190 : : return 0;
15191 : : }
15192 : :
15193 : : static int
15194 : 0 : flow_hw_table_resize_multi_pattern_actions(struct rte_eth_dev *dev,
15195 : : struct rte_flow_template_table *table,
15196 : : uint32_t nb_flows,
15197 : : struct rte_flow_error *error)
15198 : : {
15199 : 0 : struct mlx5_multi_pattern_segment *segment = table->mpctx.segments;
15200 : : uint32_t bulk_size;
15201 : : int i, ret;
15202 : :
15203 : : /**
15204 : : * Segment always allocates Modify Header Argument Objects number in
15205 : : * powers of 2.
15206 : : * On resize, PMD adds minimal required argument objects number.
15207 : : * For example, if table size was 10, it allocated 16 argument objects.
15208 : : * Resize to 15 will not add new objects.
15209 : : */
15210 : 0 : for (i = 1;
15211 [ # # # # ]: 0 : i < MLX5_MAX_TABLE_RESIZE_NUM && segment->capacity;
15212 : 0 : i++, segment++) {
15213 : : /* keep the devtools/checkpatches.sh happy */
15214 : : }
15215 [ # # ]: 0 : if (i == MLX5_MAX_TABLE_RESIZE_NUM)
15216 : 0 : return rte_flow_error_set(error, EINVAL,
15217 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15218 : : table, "too many resizes");
15219 [ # # ]: 0 : if (segment->head_index - 1 >= nb_flows)
15220 : : return 0;
15221 [ # # ]: 0 : bulk_size = rte_align32pow2(nb_flows - segment->head_index + 1);
15222 : 0 : ret = mlx5_tbl_multi_pattern_process(dev, table, segment,
15223 : : rte_log2_u32(bulk_size),
15224 : : error);
15225 [ # # ]: 0 : if (ret)
15226 : 0 : return rte_flow_error_set(error, EINVAL,
15227 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15228 : : table, "too many resizes");
15229 : : return i;
15230 : : }
15231 : :
15232 : : static int
15233 : 0 : flow_hw_table_resize(struct rte_eth_dev *dev,
15234 : : struct rte_flow_template_table *table,
15235 : : uint32_t nb_flows,
15236 : : struct rte_flow_error *error)
15237 : : {
15238 : : struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
15239 : : struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
15240 : 0 : struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr;
15241 : 0 : struct mlx5dr_action_jump_to_matcher_attr jump_attr = {
15242 : : .type = MLX5DR_ACTION_JUMP_TO_MATCHER_BY_INDEX,
15243 : : .matcher = NULL,
15244 : : };
15245 : : struct mlx5_multi_pattern_segment *segment = NULL;
15246 : : struct mlx5dr_matcher *matcher = NULL;
15247 : : struct mlx5dr_action *jump = NULL;
15248 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
15249 : 0 : uint32_t i, selector = table->matcher_selector;
15250 : 0 : uint32_t other_selector = (selector + 1) & 1;
15251 : : int ret;
15252 : :
15253 [ # # ]: 0 : if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
15254 : 0 : return rte_flow_error_set(error, EINVAL,
15255 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15256 : : table, "no resizable attribute");
15257 [ # # ]: 0 : if (table->matcher_info[other_selector].matcher)
15258 : 0 : return rte_flow_error_set(error, EINVAL,
15259 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15260 : : table, "last table resize was not completed");
15261 [ # # ]: 0 : if (nb_flows <= table->cfg.attr.nb_flows)
15262 : 0 : return rte_flow_error_set(error, EINVAL,
15263 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15264 : : table, "shrinking table is not supported");
15265 : 0 : ret = mlx5_ipool_resize(table->flow_pool, nb_flows, error);
15266 [ # # ]: 0 : if (ret)
15267 : : return ret;
15268 : : /*
15269 : : * A resizable matcher doesn't support rule update. In this case, the ipool
15270 : : * for the resource is not created and there is no need to resize it.
15271 : : */
15272 : : MLX5_ASSERT(!table->resource);
15273 [ # # ]: 0 : if (mlx5_is_multi_pattern_active(&table->mpctx)) {
15274 : 0 : ret = flow_hw_table_resize_multi_pattern_actions(dev, table, nb_flows, error);
15275 [ # # ]: 0 : if (ret < 0)
15276 : : return ret;
15277 [ # # ]: 0 : if (ret > 0)
15278 : 0 : segment = table->mpctx.segments + ret;
15279 : : }
15280 [ # # ]: 0 : for (i = 0; i < table->nb_item_templates; i++)
15281 : 0 : mt[i] = table->its[i]->mt;
15282 [ # # ]: 0 : for (i = 0; i < table->nb_action_templates; i++)
15283 : 0 : at[i] = table->ats[i].action_template->tmpl;
15284 : : nb_flows = rte_align32pow2(nb_flows);
15285 : 0 : matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
15286 : 0 : matcher = mlx5dr_matcher_create(table->grp->tbl, mt,
15287 : : table->nb_item_templates, at,
15288 : : table->nb_action_templates,
15289 : : &matcher_attr);
15290 [ # # ]: 0 : if (!matcher) {
15291 : 0 : ret = rte_flow_error_set(error, rte_errno,
15292 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15293 : : table, "failed to create new matcher");
15294 : 0 : goto error;
15295 : : }
15296 [ # # ]: 0 : if (matcher_attr.isolated) {
15297 : 0 : jump_attr.matcher = matcher;
15298 : 0 : jump = mlx5dr_action_create_jump_to_matcher(priv->dr_ctx, &jump_attr,
15299 : 0 : mlx5_hw_act_flag[!!table->cfg.attr.flow_attr.group][table->type]);
15300 [ # # ]: 0 : if (!jump) {
15301 : 0 : ret = rte_flow_error_set(error, rte_errno,
15302 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15303 : : table, "failed to create jump to matcher action");
15304 : 0 : goto error;
15305 : : }
15306 : : }
15307 : 0 : rte_rwlock_write_lock(&table->matcher_replace_rwlk);
15308 : 0 : ret = mlx5dr_matcher_resize_set_target
15309 : : (table->matcher_info[selector].matcher, matcher);
15310 [ # # ]: 0 : if (ret) {
15311 : : rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
15312 : 0 : ret = rte_flow_error_set(error, rte_errno,
15313 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15314 : : table, "failed to initiate matcher swap");
15315 : 0 : goto error;
15316 : : }
15317 : 0 : table->cfg.attr.nb_flows = nb_flows;
15318 : 0 : table->matcher_info[other_selector].matcher = matcher;
15319 : 0 : table->matcher_info[other_selector].jump = jump;
15320 : 0 : table->matcher_selector = other_selector;
15321 : 0 : rte_atomic_store_explicit(&table->matcher_info[other_selector].refcnt,
15322 : : 0, rte_memory_order_relaxed);
15323 : : rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
15324 : 0 : return 0;
15325 : 0 : error:
15326 [ # # ]: 0 : if (segment)
15327 : 0 : mlx5_destroy_multi_pattern_segment(segment);
15328 [ # # ]: 0 : if (jump)
15329 : 0 : mlx5dr_action_destroy(jump);
15330 [ # # ]: 0 : if (matcher) {
15331 : 0 : ret = mlx5dr_matcher_destroy(matcher);
15332 : 0 : return rte_flow_error_set(error, rte_errno,
15333 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15334 : : table, "failed to destroy new matcher");
15335 : : }
15336 : : return ret;
15337 : : }
15338 : :
15339 : : static int
15340 : 0 : flow_hw_table_resize_complete(struct rte_eth_dev *dev,
15341 : : struct rte_flow_template_table *table,
15342 : : struct rte_flow_error *error)
15343 : : {
15344 : : int ret;
15345 : 0 : uint32_t selector = table->matcher_selector;
15346 : 0 : uint32_t other_selector = (selector + 1) & 1;
15347 : : struct mlx5_matcher_info *matcher_info = &table->matcher_info[other_selector];
15348 : : uint32_t matcher_refcnt;
15349 : :
15350 [ # # ]: 0 : if (!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))
15351 : 0 : return rte_flow_error_set(error, EINVAL,
15352 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15353 : : table, "no resizable attribute");
15354 [ # # ]: 0 : if (matcher_info->matcher == NULL)
15355 : 0 : return rte_flow_error_set(error, EINVAL,
15356 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15357 : : table, "table resize was not started");
15358 : 0 : matcher_refcnt = rte_atomic_load_explicit(&matcher_info->refcnt,
15359 : : rte_memory_order_relaxed);
15360 [ # # ]: 0 : if (matcher_refcnt > 0)
15361 : 0 : return rte_flow_error_set(error, EBUSY,
15362 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15363 : : table, "all rules not yet updated");
15364 [ # # ]: 0 : if (matcher_info->jump)
15365 : 0 : mlx5dr_action_destroy(matcher_info->jump);
15366 : 0 : ret = mlx5dr_matcher_destroy(matcher_info->matcher);
15367 [ # # ]: 0 : if (ret)
15368 : 0 : return rte_flow_error_set(error, rte_errno,
15369 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
15370 : : table, "failed to destroy retired matcher");
15371 : 0 : matcher_info->matcher = NULL;
15372 : 0 : return 0;
15373 : : }
15374 : :
15375 : : static int
15376 : 0 : flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
15377 : : const struct rte_flow_op_attr *attr,
15378 : : struct rte_flow *flow, void *user_data,
15379 : : struct rte_flow_error *error)
15380 : : {
15381 : : int ret;
15382 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
15383 : : struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
15384 : 0 : struct rte_flow_template_table *table = hw_flow->table;
15385 [ # # ]: 0 : struct rte_flow_hw_aux *aux = mlx5_flow_hw_aux(dev->data->port_id, hw_flow);
15386 : 0 : uint32_t table_selector = table->matcher_selector;
15387 : 0 : uint32_t rule_selector = aux->matcher_selector;
15388 : : uint32_t other_selector;
15389 : : struct mlx5dr_matcher *other_matcher;
15390 : 0 : struct mlx5dr_rule_attr rule_attr = {
15391 : : .queue_id = queue,
15392 : 0 : .burst = attr->postpone,
15393 : : };
15394 : :
15395 : : MLX5_ASSERT(hw_flow->flags & MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR);
15396 : : /*
15397 : : * Update resized can be called only through async flow API.
15398 : : * These rings are allocated if and only if async flow API was configured.
15399 : : */
15400 : : MLX5_ASSERT(priv->hw_q[queue].flow_transfer_completed != NULL);
15401 : : MLX5_ASSERT(priv->hw_q[queue].flow_transfer_pending != NULL);
15402 : : /**
15403 : : * mlx5dr_matcher_resize_rule_move() accepts original table matcher -
15404 : : * the one that was used BEFORE table resize.
15405 : : * Since the function is called AFTER table resize,
15406 : : * `table->matcher_selector` always points to the new matcher and
15407 : : * `aux->matcher_selector` points to a matcher used to create the flow.
15408 : : */
15409 : : other_selector = rule_selector == table_selector ?
15410 [ # # ]: 0 : (rule_selector + 1) & 1 : rule_selector;
15411 : 0 : other_matcher = table->matcher_info[other_selector].matcher;
15412 [ # # ]: 0 : if (!other_matcher)
15413 : 0 : return rte_flow_error_set(error, EINVAL,
15414 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15415 : : "no active table resize");
15416 : 0 : hw_flow->operation_type = MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE;
15417 : 0 : hw_flow->user_data = user_data;
15418 : 0 : rule_attr.user_data = hw_flow;
15419 [ # # ]: 0 : if (rule_selector == table_selector) {
15420 : 0 : struct rte_ring *ring = !attr->postpone ?
15421 [ # # ]: 0 : priv->hw_q[queue].flow_transfer_completed :
15422 : 0 : priv->hw_q[queue].flow_transfer_pending;
15423 : 0 : rte_ring_enqueue(ring, hw_flow);
15424 : : flow_hw_q_inc_flow_ops(priv, queue);
15425 : 0 : return 0;
15426 : : }
15427 : 0 : ret = mlx5dr_matcher_resize_rule_move(other_matcher,
15428 : 0 : (struct mlx5dr_rule *)hw_flow->rule,
15429 : : &rule_attr);
15430 [ # # ]: 0 : if (ret) {
15431 : 0 : return rte_flow_error_set(error, rte_errno,
15432 : : RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
15433 : : "flow transfer failed");
15434 : : }
15435 : : flow_hw_q_inc_flow_ops(priv, queue);
15436 : 0 : return 0;
15437 : : }
15438 : :
15439 : : /**
15440 : : * Internal validation function. For validating both actions and items.
15441 : : *
15442 : : * @param[in] dev
15443 : : * Pointer to the rte_eth_dev structure.
15444 : : * @param[in] attr
15445 : : * Pointer to the flow attributes.
15446 : : * @param[in] items
15447 : : * Pointer to the list of items.
15448 : : * @param[in] actions
15449 : : * Pointer to the list of actions.
15450 : : * @param[in] external
15451 : : * This flow rule is created by request external to PMD.
15452 : : * @param[in] hairpin
15453 : : * Number of hairpin TX actions, 0 means classic flow.
15454 : : * @param[out] error
15455 : : * Pointer to the error structure.
15456 : : *
15457 : : * @return
15458 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
15459 : : */
15460 : : static int
15461 : 0 : flow_hw_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
15462 : : const struct rte_flow_item items[],
15463 : : const struct rte_flow_action actions[] __rte_unused,
15464 : : bool external, int hairpin __rte_unused,
15465 : : struct rte_flow_error *error)
15466 : : {
15467 : 0 : const struct rte_flow_pattern_template_attr pattern_template_attr = {
15468 : : .relaxed_matching = 0,
15469 : 0 : .ingress = attr->ingress,
15470 : 0 : .egress = attr->egress,
15471 : 0 : .transfer = attr->transfer,
15472 : : };
15473 : 0 : uint64_t item_flags = 0;
15474 : : int ret = 0;
15475 : :
15476 [ # # ]: 0 : if (external) {
15477 : : /* Validate application items only */
15478 : 0 : ret = __flow_hw_pattern_validate(dev, &pattern_template_attr, items,
15479 : : &item_flags, true, error);
15480 [ # # ]: 0 : if (ret < 0)
15481 : 0 : return -rte_errno;
15482 : : }
15483 : : return 0;
15484 : : }
15485 : :
15486 : :
15487 : : const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
15488 : : .list_create = flow_hw_list_create,
15489 : : .list_destroy = mlx5_flow_hw_list_destroy,
15490 : : .validate = flow_hw_validate,
15491 : : .info_get = flow_hw_info_get,
15492 : : .configure = flow_hw_configure,
15493 : : .pattern_validate = flow_hw_pattern_validate,
15494 : : .pattern_template_create = flow_hw_external_pattern_template_create,
15495 : : .pattern_template_destroy = flow_hw_pattern_template_destroy,
15496 : : .actions_validate = flow_hw_actions_validate,
15497 : : .actions_template_create = flow_hw_actions_template_create,
15498 : : .actions_template_destroy = flow_hw_actions_template_destroy,
15499 : : .template_table_create = flow_hw_template_table_create,
15500 : : .template_table_destroy = flow_hw_table_destroy,
15501 : : .table_resize = flow_hw_table_resize,
15502 : : .group_set_miss_actions = mlx5_flow_hw_group_set_miss_actions,
15503 : : .async_flow_create = flow_hw_async_flow_create,
15504 : : .async_flow_create_by_index = flow_hw_async_flow_create_by_index,
15505 : : .async_flow_update = flow_hw_async_flow_update,
15506 : : .async_flow_destroy = flow_hw_async_flow_destroy,
15507 : : .flow_update_resized = flow_hw_update_resized,
15508 : : .table_resize_complete = flow_hw_table_resize_complete,
15509 : : .pull = flow_hw_pull,
15510 : : .push = flow_hw_push,
15511 : : .async_action_create = flow_hw_action_handle_create,
15512 : : .async_action_destroy = flow_hw_action_handle_destroy,
15513 : : .async_action_update = flow_hw_action_handle_update,
15514 : : .async_action_query_update = flow_hw_async_action_handle_query_update,
15515 : : .async_action_query = flow_hw_action_handle_query,
15516 : : .action_validate = flow_hw_action_validate,
15517 : : .action_create = flow_hw_action_create,
15518 : : .action_destroy = flow_hw_action_destroy,
15519 : : .action_update = flow_hw_action_update,
15520 : : .action_query = flow_hw_action_query,
15521 : : .action_query_update = flow_hw_action_query_update,
15522 : : .action_list_handle_create = flow_hw_action_list_handle_create,
15523 : : .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
15524 : : .action_list_handle_query_update =
15525 : : flow_hw_action_list_handle_query_update,
15526 : : .async_action_list_handle_create =
15527 : : flow_hw_async_action_list_handle_create,
15528 : : .async_action_list_handle_destroy =
15529 : : flow_hw_async_action_list_handle_destroy,
15530 : : .async_action_list_handle_query_update =
15531 : : flow_hw_async_action_list_handle_query_update,
15532 : : .query = flow_hw_query,
15533 : : .get_aged_flows = flow_hw_get_aged_flows,
15534 : : .get_q_aged_flows = flow_hw_get_q_aged_flows,
15535 : : .item_create = mlx5_flow_dv_item_create,
15536 : : .item_release = mlx5_flow_dv_item_release,
15537 : : .flow_calc_table_hash = flow_hw_calc_table_hash,
15538 : : .flow_calc_encap_hash = flow_hw_calc_encap_hash,
15539 : : };
15540 : :
15541 : : /**
15542 : : * Creates a control flow using flow template API on @p proxy_dev device,
15543 : : * on behalf of @p owner_dev device.
15544 : : *
15545 : : * This function uses locks internally to synchronize access to the
15546 : : * flow queue.
15547 : : *
15548 : : * Created flow is stored in private list associated with @p proxy_dev device.
15549 : : *
15550 : : * @param owner_dev
15551 : : * Pointer to Ethernet device on behalf of which flow is created.
15552 : : * @param proxy_dev
15553 : : * Pointer to Ethernet device on which flow is created.
15554 : : * @param table
15555 : : * Pointer to flow table.
15556 : : * @param items
15557 : : * Pointer to flow rule items.
15558 : : * @param item_template_idx
15559 : : * Index of an item template associated with @p table.
15560 : : * @param actions
15561 : : * Pointer to flow rule actions.
15562 : : * @param action_template_idx
15563 : : * Index of an action template associated with @p table.
15564 : : * @param info
15565 : : * Additional info about control flow rule.
15566 : : * @param external
15567 : : * External ctrl flow.
15568 : : *
15569 : : * @return
15570 : : * 0 on success, negative errno value otherwise and rte_errno set.
15571 : : */
15572 : : static __rte_unused int
15573 : 0 : flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
15574 : : struct rte_eth_dev *proxy_dev,
15575 : : struct rte_flow_template_table *table,
15576 : : struct rte_flow_item items[],
15577 : : uint8_t item_template_idx,
15578 : : struct rte_flow_action actions[],
15579 : : uint8_t action_template_idx,
15580 : : struct mlx5_ctrl_flow_info *info,
15581 : : bool external)
15582 : : {
15583 : 0 : struct mlx5_priv *priv = proxy_dev->data->dev_private;
15584 : 0 : uint32_t queue = CTRL_QUEUE_ID(priv);
15585 : 0 : struct rte_flow_op_attr op_attr = {
15586 : : .postpone = 0,
15587 : : };
15588 : : struct rte_flow *flow = NULL;
15589 : : struct mlx5_ctrl_flow_entry *entry = NULL;
15590 : : int ret;
15591 : :
15592 : 0 : rte_spinlock_lock(&priv->hw_ctrl_lock);
15593 : 0 : entry = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_SYS, sizeof(*entry),
15594 : : 0, SOCKET_ID_ANY);
15595 [ # # ]: 0 : if (!entry) {
15596 : 0 : DRV_LOG(ERR, "port %u not enough memory to create control flows",
15597 : : proxy_dev->data->port_id);
15598 : 0 : rte_errno = ENOMEM;
15599 : : ret = -rte_errno;
15600 : 0 : goto error;
15601 : : }
15602 : 0 : flow = flow_hw_async_flow_create(proxy_dev, queue, &op_attr, table,
15603 : : items, item_template_idx,
15604 : : actions, action_template_idx,
15605 : : NULL, NULL);
15606 [ # # ]: 0 : if (!flow) {
15607 : 0 : DRV_LOG(ERR, "port %u failed to enqueue create control"
15608 : : " flow operation", proxy_dev->data->port_id);
15609 : 0 : ret = -rte_errno;
15610 : 0 : goto error;
15611 : : }
15612 : 0 : ret = __flow_hw_pull_comp(proxy_dev, queue, NULL);
15613 [ # # ]: 0 : if (ret) {
15614 : 0 : DRV_LOG(ERR, "port %u failed to insert control flow",
15615 : : proxy_dev->data->port_id);
15616 : 0 : rte_errno = EINVAL;
15617 : : ret = -rte_errno;
15618 : 0 : goto error;
15619 : : }
15620 : 0 : entry->owner_dev = owner_dev;
15621 : 0 : entry->flow = flow;
15622 [ # # ]: 0 : if (info)
15623 : 0 : entry->info = *info;
15624 : : else
15625 : 0 : entry->info.type = MLX5_CTRL_FLOW_TYPE_GENERAL;
15626 [ # # ]: 0 : if (external)
15627 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
15628 : : else
15629 [ # # ]: 0 : LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
15630 : : rte_spinlock_unlock(&priv->hw_ctrl_lock);
15631 : 0 : return 0;
15632 : 0 : error:
15633 [ # # ]: 0 : if (entry)
15634 : 0 : mlx5_free(entry);
15635 : : rte_spinlock_unlock(&priv->hw_ctrl_lock);
15636 : 0 : return ret;
15637 : : }
15638 : :
15639 : : /**
15640 : : * Destroys a control flow @p flow using flow template API on @p dev device.
15641 : : *
15642 : : * This function uses locks internally to synchronize access to the
15643 : : * flow queue.
15644 : : *
15645 : : * If the @p flow is stored on any private list/pool, then caller must free up
15646 : : * the relevant resources.
15647 : : *
15648 : : * @param dev
15649 : : * Pointer to Ethernet device.
15650 : : * @param flow
15651 : : * Pointer to flow rule.
15652 : : *
15653 : : * @return
15654 : : * 0 on success, non-zero value otherwise.
15655 : : */
15656 : : static int
15657 : 0 : flow_hw_destroy_ctrl_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
15658 : : {
15659 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
15660 : 0 : uint32_t queue = CTRL_QUEUE_ID(priv);
15661 : 0 : struct rte_flow_op_attr op_attr = {
15662 : : .postpone = 0,
15663 : : };
15664 : : int ret;
15665 : :
15666 : 0 : rte_spinlock_lock(&priv->hw_ctrl_lock);
15667 : 0 : ret = flow_hw_async_flow_destroy(dev, queue, &op_attr, flow, NULL, NULL);
15668 [ # # ]: 0 : if (ret) {
15669 : 0 : DRV_LOG(ERR, "port %u failed to enqueue destroy control"
15670 : : " flow operation", dev->data->port_id);
15671 : 0 : goto exit;
15672 : : }
15673 : 0 : ret = __flow_hw_pull_comp(dev, queue, NULL);
15674 [ # # ]: 0 : if (ret) {
15675 : 0 : DRV_LOG(ERR, "port %u failed to destroy control flow",
15676 : : dev->data->port_id);
15677 : 0 : rte_errno = EINVAL;
15678 : : ret = -rte_errno;
15679 : 0 : goto exit;
15680 : : }
15681 : 0 : exit:
15682 : : rte_spinlock_unlock(&priv->hw_ctrl_lock);
15683 : 0 : return ret;
15684 : : }
15685 : :
15686 : : /**
15687 : : * Destroys control flows created on behalf of @p owner device on @p dev device.
15688 : : *
15689 : : * @param dev
15690 : : * Pointer to Ethernet device on which control flows were created.
15691 : : * @param owner
15692 : : * Pointer to Ethernet device owning control flows.
15693 : : *
15694 : : * @return
15695 : : * 0 on success, otherwise negative error code is returned and
15696 : : * rte_errno is set.
15697 : : */
15698 : : static int
15699 : 0 : flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
15700 : : {
15701 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
15702 : : struct mlx5_ctrl_flow_entry *cf;
15703 : : struct mlx5_ctrl_flow_entry *cf_next;
15704 : : int ret;
15705 : :
15706 : 0 : cf = LIST_FIRST(&priv->hw_ctrl_flows);
15707 [ # # ]: 0 : while (cf != NULL) {
15708 : 0 : cf_next = LIST_NEXT(cf, next);
15709 [ # # ]: 0 : if (cf->owner_dev == owner) {
15710 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15711 [ # # ]: 0 : if (ret) {
15712 : 0 : rte_errno = ret;
15713 : 0 : return -ret;
15714 : : }
15715 [ # # ]: 0 : LIST_REMOVE(cf, next);
15716 : 0 : mlx5_free(cf);
15717 : : }
15718 : : cf = cf_next;
15719 : : }
15720 : : return 0;
15721 : : }
15722 : :
15723 : : /**
15724 : : * Destroys control flows created for @p owner_dev device.
15725 : : *
15726 : : * @param owner_dev
15727 : : * Pointer to Ethernet device owning control flows.
15728 : : *
15729 : : * @return
15730 : : * 0 on success, otherwise negative error code is returned and
15731 : : * rte_errno is set.
15732 : : */
15733 : : int
15734 : 0 : mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *owner_dev)
15735 : : {
15736 : 0 : struct mlx5_priv *owner_priv = owner_dev->data->dev_private;
15737 : : struct rte_eth_dev *proxy_dev;
15738 : 0 : uint16_t owner_port_id = owner_dev->data->port_id;
15739 : 0 : uint16_t proxy_port_id = owner_dev->data->port_id;
15740 : : int ret;
15741 : :
15742 : : /* Flush all flows created by this port for itself. */
15743 : 0 : ret = flow_hw_flush_ctrl_flows_owned_by(owner_dev, owner_dev);
15744 [ # # ]: 0 : if (ret)
15745 : : return ret;
15746 : : /* Flush all flows created for this port on proxy port. */
15747 [ # # ]: 0 : if (owner_priv->sh->config.dv_esw_en) {
15748 : 0 : ret = rte_flow_pick_transfer_proxy(owner_port_id, &proxy_port_id, NULL);
15749 [ # # ]: 0 : if (ret == -ENODEV) {
15750 : 0 : DRV_LOG(DEBUG, "Unable to find transfer proxy port for port %u. It was "
15751 : : "probably closed. Control flows were cleared.",
15752 : : owner_port_id);
15753 : 0 : rte_errno = 0;
15754 : 0 : return 0;
15755 [ # # ]: 0 : } else if (ret) {
15756 : 0 : DRV_LOG(ERR, "Unable to find proxy port for port %u (ret = %d)",
15757 : : owner_port_id, ret);
15758 : 0 : return ret;
15759 : : }
15760 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
15761 : : } else {
15762 : : proxy_dev = owner_dev;
15763 : : }
15764 : 0 : return flow_hw_flush_ctrl_flows_owned_by(proxy_dev, owner_dev);
15765 : : }
15766 : :
15767 : : /**
15768 : : * Destroys all control flows created on @p dev device.
15769 : : *
15770 : : * @param owner_dev
15771 : : * Pointer to Ethernet device.
15772 : : *
15773 : : * @return
15774 : : * 0 on success, otherwise negative error code is returned and
15775 : : * rte_errno is set.
15776 : : */
15777 : : static int
15778 : 0 : flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
15779 : : {
15780 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
15781 : : struct mlx5_ctrl_flow_entry *cf;
15782 : : struct mlx5_ctrl_flow_entry *cf_next;
15783 : : int ret;
15784 : :
15785 : 0 : cf = LIST_FIRST(&priv->hw_ctrl_flows);
15786 [ # # ]: 0 : while (cf != NULL) {
15787 : 0 : cf_next = LIST_NEXT(cf, next);
15788 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15789 [ # # ]: 0 : if (ret) {
15790 : 0 : rte_errno = ret;
15791 : 0 : return -ret;
15792 : : }
15793 [ # # ]: 0 : LIST_REMOVE(cf, next);
15794 : 0 : mlx5_free(cf);
15795 : : cf = cf_next;
15796 : : }
15797 : 0 : cf = LIST_FIRST(&priv->hw_ext_ctrl_flows);
15798 [ # # ]: 0 : while (cf != NULL) {
15799 : 0 : cf_next = LIST_NEXT(cf, next);
15800 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, cf->flow);
15801 [ # # ]: 0 : if (ret) {
15802 : 0 : rte_errno = ret;
15803 : 0 : return -ret;
15804 : : }
15805 [ # # ]: 0 : LIST_REMOVE(cf, next);
15806 : 0 : mlx5_free(cf);
15807 : : cf = cf_next;
15808 : : }
15809 : : return 0;
15810 : : }
15811 : :
15812 : : int
15813 : 0 : mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15814 : : {
15815 : 0 : uint16_t port_id = dev->data->port_id;
15816 : 0 : struct rte_flow_item_ethdev esw_mgr_spec = {
15817 : : .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15818 : : };
15819 : 0 : struct rte_flow_item_ethdev esw_mgr_mask = {
15820 : : .port_id = MLX5_REPRESENTED_PORT_ESW_MGR,
15821 : : };
15822 : 0 : struct rte_flow_item_tag reg_c0_spec = {
15823 : : .index = (uint8_t)REG_C_0,
15824 : : .data = flow_hw_esw_mgr_regc_marker(dev),
15825 : : };
15826 : 0 : struct rte_flow_item_tag reg_c0_mask = {
15827 : : .index = 0xff,
15828 : : .data = flow_hw_esw_mgr_regc_marker_mask(dev),
15829 : : };
15830 : 0 : struct mlx5_rte_flow_item_sq sq_spec = {
15831 : : .queue = sqn,
15832 : : };
15833 : 0 : struct rte_flow_action_ethdev port = {
15834 : : .port_id = port_id,
15835 : : };
15836 : 0 : struct rte_flow_item items[3] = { { 0 } };
15837 : 0 : struct rte_flow_action actions[3] = { { 0 } };
15838 : 0 : struct mlx5_ctrl_flow_info flow_info = {
15839 : : .type = MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
15840 : : .esw_mgr_sq = sqn,
15841 : : };
15842 : : struct rte_eth_dev *proxy_dev;
15843 : : struct mlx5_priv *proxy_priv;
15844 : 0 : uint16_t proxy_port_id = dev->data->port_id;
15845 : : int ret;
15846 : :
15847 : 0 : ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15848 [ # # ]: 0 : if (ret) {
15849 : 0 : DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15850 : : "port must be present to create default SQ miss flows.",
15851 : : port_id);
15852 : 0 : return ret;
15853 : : }
15854 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
15855 : 0 : proxy_priv = proxy_dev->data->dev_private;
15856 [ # # ]: 0 : if (!proxy_priv->dr_ctx) {
15857 : 0 : DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
15858 : : "for HWS to create default SQ miss flows. Default flows will "
15859 : : "not be created.",
15860 : : proxy_port_id, port_id);
15861 : 0 : return 0;
15862 : : }
15863 [ # # ]: 0 : if (!proxy_priv->hw_ctrl_fdb ||
15864 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15865 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) {
15866 : 0 : DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
15867 : : "default flow tables were not created.",
15868 : : proxy_port_id, port_id);
15869 : 0 : rte_errno = ENOMEM;
15870 : 0 : return -rte_errno;
15871 : : }
15872 : : /*
15873 : : * Create a root SQ miss flow rule - match E-Switch Manager and SQ,
15874 : : * and jump to group 1.
15875 : : */
15876 : 0 : items[0] = (struct rte_flow_item){
15877 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
15878 : : .spec = &esw_mgr_spec,
15879 : : .mask = &esw_mgr_mask,
15880 : : };
15881 : 0 : items[1] = (struct rte_flow_item){
15882 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15883 : : .spec = &sq_spec,
15884 : : };
15885 : 0 : items[2] = (struct rte_flow_item){
15886 : : .type = RTE_FLOW_ITEM_TYPE_END,
15887 : : };
15888 : 0 : actions[0] = (struct rte_flow_action){
15889 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
15890 : : };
15891 : 0 : actions[1] = (struct rte_flow_action){
15892 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
15893 : : };
15894 : 0 : actions[2] = (struct rte_flow_action) {
15895 : : .type = RTE_FLOW_ACTION_TYPE_END,
15896 : : };
15897 : 0 : ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15898 : : proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl,
15899 : : items, 0, actions, 0, &flow_info, external);
15900 [ # # ]: 0 : if (ret) {
15901 : 0 : DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d",
15902 : : port_id, sqn, ret);
15903 : 0 : return ret;
15904 : : }
15905 : : /*
15906 : : * Create a non-root SQ miss flow rule - match REG_C_0 marker and SQ,
15907 : : * and forward to port.
15908 : : */
15909 : 0 : items[0] = (struct rte_flow_item){
15910 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
15911 : : .spec = ®_c0_spec,
15912 : : .mask = ®_c0_mask,
15913 : : };
15914 : 0 : items[1] = (struct rte_flow_item){
15915 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
15916 : : .spec = &sq_spec,
15917 : : };
15918 : 0 : items[2] = (struct rte_flow_item){
15919 : : .type = RTE_FLOW_ITEM_TYPE_END,
15920 : : };
15921 : 0 : actions[0] = (struct rte_flow_action){
15922 : : .type = RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
15923 : : .conf = &port,
15924 : : };
15925 : 0 : actions[1] = (struct rte_flow_action){
15926 : : .type = RTE_FLOW_ACTION_TYPE_END,
15927 : : };
15928 : 0 : flow_info.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS;
15929 : 0 : ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
15930 : 0 : proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
15931 : : items, 0, actions, 0, &flow_info, external);
15932 [ # # ]: 0 : if (ret) {
15933 : 0 : DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d",
15934 : : port_id, sqn, ret);
15935 : 0 : return ret;
15936 : : }
15937 : : return 0;
15938 : : }
15939 : :
15940 : : static bool
15941 : : flow_hw_is_matching_sq_miss_flow(struct mlx5_ctrl_flow_entry *cf,
15942 : : struct rte_eth_dev *dev,
15943 : : uint32_t sqn)
15944 : : {
15945 : 0 : if (cf->owner_dev != dev)
15946 : : return false;
15947 [ # # # # ]: 0 : if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
15948 : : return true;
15949 [ # # # # ]: 0 : if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
15950 : : return true;
15951 : : return false;
15952 : : }
15953 : :
15954 : : int
15955 : 0 : mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
15956 : : {
15957 : 0 : uint16_t port_id = dev->data->port_id;
15958 : 0 : uint16_t proxy_port_id = dev->data->port_id;
15959 : : struct rte_eth_dev *proxy_dev;
15960 : : struct mlx5_priv *proxy_priv;
15961 : : struct mlx5_ctrl_flow_entry *cf;
15962 : : struct mlx5_ctrl_flow_entry *cf_next;
15963 : : int ret;
15964 : :
15965 : 0 : ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
15966 [ # # ]: 0 : if (ret) {
15967 : 0 : DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
15968 : : "port must be present for default SQ miss flow rules to exist.",
15969 : : port_id);
15970 : 0 : return ret;
15971 : : }
15972 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
15973 : 0 : proxy_priv = proxy_dev->data->dev_private;
15974 : : /* FDB default flow rules must be enabled. */
15975 : : MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
15976 [ # # ]: 0 : if (!proxy_priv->dr_ctx)
15977 : : return 0;
15978 [ # # ]: 0 : if (!proxy_priv->hw_ctrl_fdb ||
15979 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
15980 [ # # ]: 0 : !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl)
15981 : : return 0;
15982 [ # # ]: 0 : cf = external ? LIST_FIRST(&proxy_priv->hw_ext_ctrl_flows) :
15983 : : LIST_FIRST(&proxy_priv->hw_ctrl_flows);
15984 [ # # ]: 0 : while (cf != NULL) {
15985 [ # # ]: 0 : cf_next = LIST_NEXT(cf, next);
15986 : : if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
15987 : 0 : claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
15988 [ # # ]: 0 : LIST_REMOVE(cf, next);
15989 : 0 : mlx5_free(cf);
15990 : : }
15991 : : cf = cf_next;
15992 : : }
15993 : : return 0;
15994 : : }
15995 : :
15996 : : int
15997 : 0 : mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
15998 : : {
15999 : 0 : uint16_t port_id = dev->data->port_id;
16000 : 0 : struct rte_flow_item_ethdev port_spec = {
16001 : : .port_id = port_id,
16002 : : };
16003 : 0 : struct rte_flow_item items[] = {
16004 : : {
16005 : : .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
16006 : : .spec = &port_spec,
16007 : : },
16008 : : {
16009 : : .type = RTE_FLOW_ITEM_TYPE_END,
16010 : : },
16011 : : };
16012 : 0 : struct rte_flow_action_jump jump = {
16013 : : .group = 1,
16014 : : };
16015 : 0 : struct rte_flow_action actions[] = {
16016 : : {
16017 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
16018 : : .conf = &jump,
16019 : : },
16020 : : {
16021 : : .type = RTE_FLOW_ACTION_TYPE_END,
16022 : : }
16023 : : };
16024 : 0 : struct mlx5_ctrl_flow_info flow_info = {
16025 : : .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
16026 : : };
16027 : : struct rte_eth_dev *proxy_dev;
16028 : : struct mlx5_priv *proxy_priv;
16029 : 0 : uint16_t proxy_port_id = dev->data->port_id;
16030 : : int ret;
16031 : :
16032 : 0 : ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
16033 [ # # ]: 0 : if (ret) {
16034 : 0 : DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
16035 : : "port must be present to create default FDB jump rule.",
16036 : : port_id);
16037 : 0 : return ret;
16038 : : }
16039 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
16040 : 0 : proxy_priv = proxy_dev->data->dev_private;
16041 : : /* FDB default flow rules must be enabled. */
16042 : : MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule);
16043 [ # # ]: 0 : if (!proxy_priv->dr_ctx) {
16044 : 0 : DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured "
16045 : : "for HWS to create default FDB jump rule. Default rule will "
16046 : : "not be created.",
16047 : : proxy_port_id, port_id);
16048 : 0 : return 0;
16049 : : }
16050 [ # # # # ]: 0 : if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) {
16051 : 0 : DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but "
16052 : : "default flow tables were not created.",
16053 : : proxy_port_id, port_id);
16054 : 0 : rte_errno = EINVAL;
16055 : 0 : return -rte_errno;
16056 : : }
16057 : 0 : return flow_hw_create_ctrl_flow(dev, proxy_dev,
16058 : : proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl,
16059 : : items, 0, actions, 0, &flow_info, false);
16060 : : }
16061 : :
16062 : : int
16063 : 0 : mlx5_flow_hw_create_nic_tx_default_mreg_copy_flow(struct rte_eth_dev *dev, uint32_t sqn)
16064 : : {
16065 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16066 : 0 : struct mlx5_rte_flow_item_sq sq_spec = {
16067 : : .queue = sqn,
16068 : : };
16069 : 0 : struct rte_flow_item items[] = {
16070 : : {
16071 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
16072 : : .spec = &sq_spec,
16073 : : },
16074 : : {
16075 : : .type = RTE_FLOW_ITEM_TYPE_END,
16076 : : },
16077 : : };
16078 : 0 : struct rte_flow_action_modify_field mreg_action = {
16079 : : .operation = RTE_FLOW_MODIFY_SET,
16080 : : .dst = {
16081 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
16082 : : .tag_index = REG_C_1,
16083 : : },
16084 : : .src = {
16085 : : .field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
16086 : : .tag_index = REG_A,
16087 : : },
16088 : : .width = 32,
16089 : : };
16090 : 0 : struct rte_flow_action copy_reg_action[] = {
16091 : : [0] = {
16092 : : .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
16093 : : .conf = &mreg_action,
16094 : : },
16095 : : [1] = {
16096 : : .type = RTE_FLOW_ACTION_TYPE_JUMP,
16097 : : },
16098 : : [2] = {
16099 : : .type = RTE_FLOW_ACTION_TYPE_END,
16100 : : },
16101 : : };
16102 : 0 : struct mlx5_ctrl_flow_info flow_info = {
16103 : : .type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
16104 : : .tx_repr_sq = sqn,
16105 : : };
16106 : :
16107 : 0 : return flow_hw_create_ctrl_flow(dev, dev,
16108 : 0 : priv->hw_ctrl_nic->hw_tx_meta_cpy_tbl,
16109 : : items, 0, copy_reg_action, 0, &flow_info, false);
16110 : : }
16111 : :
16112 : : int
16113 : 0 : mlx5_flow_hw_create_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
16114 : : {
16115 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16116 : 0 : struct mlx5_rte_flow_item_sq sq_spec = {
16117 : : .queue = sqn,
16118 : : };
16119 : 0 : struct rte_flow_item items[] = {
16120 : : {
16121 : : .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
16122 : : .spec = &sq_spec,
16123 : : },
16124 : : {
16125 : : .type = RTE_FLOW_ITEM_TYPE_END,
16126 : : },
16127 : : };
16128 : : /*
16129 : : * Allocate actions array suitable for all cases - extended metadata enabled or not.
16130 : : * With extended metadata there will be an additional MODIFY_FIELD action before JUMP.
16131 : : */
16132 : 0 : struct rte_flow_action actions[] = {
16133 : : { .type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD },
16134 : : { .type = RTE_FLOW_ACTION_TYPE_JUMP },
16135 : : { .type = RTE_FLOW_ACTION_TYPE_END },
16136 : : { .type = RTE_FLOW_ACTION_TYPE_END },
16137 : : };
16138 : 0 : struct mlx5_ctrl_flow_info flow_info = {
16139 : : .type = MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
16140 : : .tx_repr_sq = sqn,
16141 : : };
16142 : :
16143 [ # # ]: 0 : if (!priv->dr_ctx) {
16144 : 0 : DRV_LOG(DEBUG, "Port %u must be configured for HWS, before creating "
16145 : : "default egress flow rules. Omitting creation.",
16146 : : dev->data->port_id);
16147 : 0 : return 0;
16148 : : }
16149 [ # # ]: 0 : if (!priv->hw_tx_repr_tagging_tbl) {
16150 : 0 : DRV_LOG(ERR, "Port %u is configured for HWS, but table for default "
16151 : : "egress flow rules does not exist.",
16152 : : dev->data->port_id);
16153 : 0 : rte_errno = EINVAL;
16154 : 0 : return -rte_errno;
16155 : : }
16156 : : /*
16157 : : * If extended metadata mode is enabled, then an additional MODIFY_FIELD action must be
16158 : : * placed before terminating JUMP action.
16159 : : */
16160 [ # # ]: 0 : if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
16161 : 0 : actions[1].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD;
16162 : 0 : actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP;
16163 : : }
16164 : 0 : return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl,
16165 : : items, 0, actions, 0, &flow_info, external);
16166 : : }
16167 : :
16168 : : static bool
16169 : : flow_hw_is_tx_matching_repr_matching_flow(struct mlx5_ctrl_flow_entry *cf,
16170 : : struct rte_eth_dev *dev,
16171 : : uint32_t sqn)
16172 : : {
16173 : 0 : if (cf->owner_dev != dev)
16174 : : return false;
16175 [ # # # # ]: 0 : if (cf->info.type == MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH && cf->info.tx_repr_sq == sqn)
16176 : : return true;
16177 : : return false;
16178 : : }
16179 : :
16180 : : int
16181 : 0 : mlx5_flow_hw_destroy_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
16182 : : {
16183 : 0 : uint16_t port_id = dev->data->port_id;
16184 : 0 : uint16_t proxy_port_id = dev->data->port_id;
16185 : : struct rte_eth_dev *proxy_dev;
16186 : : struct mlx5_priv *proxy_priv;
16187 : : struct mlx5_ctrl_flow_entry *cf;
16188 : : struct mlx5_ctrl_flow_entry *cf_next;
16189 : : int ret;
16190 : :
16191 : 0 : ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
16192 [ # # ]: 0 : if (ret) {
16193 : 0 : DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
16194 : : "port must be present for default SQ miss flow rules to exist.",
16195 : : port_id);
16196 : 0 : return ret;
16197 : : }
16198 : 0 : proxy_dev = &rte_eth_devices[proxy_port_id];
16199 : 0 : proxy_priv = proxy_dev->data->dev_private;
16200 [ # # ]: 0 : if (!proxy_priv->dr_ctx ||
16201 [ # # ]: 0 : !proxy_priv->hw_tx_repr_tagging_tbl)
16202 : : return 0;
16203 [ # # ]: 0 : cf = external ? LIST_FIRST(&proxy_priv->hw_ext_ctrl_flows) :
16204 : : LIST_FIRST(&proxy_priv->hw_ctrl_flows);
16205 [ # # ]: 0 : while (cf != NULL) {
16206 [ # # ]: 0 : cf_next = LIST_NEXT(cf, next);
16207 : : if (flow_hw_is_tx_matching_repr_matching_flow(cf, dev, sqn)) {
16208 : 0 : claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
16209 [ # # ]: 0 : LIST_REMOVE(cf, next);
16210 : 0 : mlx5_free(cf);
16211 : : }
16212 : : cf = cf_next;
16213 : : }
16214 : : return 0;
16215 : : }
16216 : :
16217 : : int
16218 : 0 : mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
16219 : : {
16220 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16221 : 0 : struct rte_flow_item_eth lacp_item = {
16222 : : .type = RTE_BE16(RTE_ETHER_TYPE_SLOW),
16223 : : };
16224 : 0 : struct rte_flow_item eth_lacp[] = {
16225 : : [0] = {
16226 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
16227 : : .spec = &lacp_item,
16228 : : .mask = &lacp_item,
16229 : : },
16230 : : [1] = {
16231 : : .type = RTE_FLOW_ITEM_TYPE_END,
16232 : : },
16233 : : };
16234 : 0 : struct rte_flow_action miss_action[] = {
16235 : : [0] = {
16236 : : .type = (enum rte_flow_action_type)
16237 : : MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
16238 : : },
16239 : : [1] = {
16240 : : .type = RTE_FLOW_ACTION_TYPE_END,
16241 : : },
16242 : : };
16243 : 0 : struct mlx5_ctrl_flow_info flow_info = {
16244 : : .type = MLX5_CTRL_FLOW_TYPE_LACP_RX,
16245 : : };
16246 : :
16247 [ # # # # : 0 : if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
# # ]
16248 : : return 0;
16249 : 0 : return flow_hw_create_ctrl_flow(dev, dev,
16250 : : priv->hw_ctrl_fdb->hw_lacp_rx_tbl,
16251 : : eth_lacp, 0, miss_action, 0, &flow_info, false);
16252 : : }
16253 : :
16254 : : static uint32_t
16255 : : __calc_pattern_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
16256 : : {
16257 : : switch (eth_pattern_type) {
16258 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
16259 : : return MLX5_CTRL_PROMISCUOUS;
16260 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
16261 : : return MLX5_CTRL_ALL_MULTICAST;
16262 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
16263 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
16264 : : return MLX5_CTRL_BROADCAST;
16265 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
16266 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
16267 : : return MLX5_CTRL_IPV4_MULTICAST;
16268 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
16269 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
16270 : : return MLX5_CTRL_IPV6_MULTICAST;
16271 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
16272 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
16273 : : return MLX5_CTRL_DMAC;
16274 : : default:
16275 : : /* Should not reach here. */
16276 : : MLX5_ASSERT(false);
16277 : : return 0;
16278 : : }
16279 : : }
16280 : :
16281 : : static uint32_t
16282 : : __calc_vlan_flags(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type)
16283 : : {
16284 [ # # ]: 0 : switch (eth_pattern_type) {
16285 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
16286 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
16287 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
16288 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
16289 : : return MLX5_CTRL_VLAN_FILTER;
16290 : 0 : default:
16291 : 0 : return 0;
16292 : : }
16293 : : }
16294 : :
16295 : : static bool
16296 [ # # ]: 0 : eth_pattern_type_is_requested(const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
16297 : : uint32_t flags)
16298 : : {
16299 : : uint32_t pattern_flags = __calc_pattern_flags(eth_pattern_type);
16300 : : uint32_t vlan_flags = __calc_vlan_flags(eth_pattern_type);
16301 : 0 : bool pattern_requested = !!(pattern_flags & flags);
16302 [ # # # # ]: 0 : bool consider_vlan = vlan_flags || (MLX5_CTRL_VLAN_FILTER & flags);
16303 : 0 : bool vlan_requested = !!(vlan_flags & flags);
16304 : :
16305 [ # # ]: 0 : if (consider_vlan)
16306 : 0 : return pattern_requested && vlan_requested;
16307 : : else
16308 : : return pattern_requested;
16309 : : }
16310 : :
16311 : : static bool
16312 : : rss_type_is_requested(struct mlx5_priv *priv,
16313 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16314 : : {
16315 : 0 : struct rte_flow_actions_template *at = priv->hw_ctrl_rx->rss[rss_type];
16316 : : unsigned int i;
16317 : :
16318 [ # # # # ]: 0 : for (i = 0; at->actions[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) {
16319 [ # # # # ]: 0 : if (at->actions[i].type == RTE_FLOW_ACTION_TYPE_RSS) {
16320 : 0 : const struct rte_flow_action_rss *rss = at->actions[i].conf;
16321 : 0 : uint64_t rss_types = rss->types;
16322 : :
16323 [ # # # # ]: 0 : if ((rss_types & priv->rss_conf.rss_hf) != rss_types)
16324 : : return false;
16325 : : }
16326 : : }
16327 : : return true;
16328 : : }
16329 : :
16330 : : static const struct rte_flow_item_eth *
16331 : : __get_eth_spec(const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern)
16332 : : {
16333 : 0 : switch (pattern) {
16334 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
16335 : : return &ctrl_rx_eth_promisc_spec;
16336 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
16337 : 0 : return &ctrl_rx_eth_mcast_spec;
16338 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
16339 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
16340 : 0 : return &ctrl_rx_eth_bcast_spec;
16341 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
16342 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
16343 : 0 : return &ctrl_rx_eth_ipv4_mcast_spec;
16344 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
16345 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
16346 : 0 : return &ctrl_rx_eth_ipv6_mcast_spec;
16347 : 0 : default:
16348 : : /* This case should not be reached. */
16349 : : MLX5_ASSERT(false);
16350 : 0 : return NULL;
16351 : : }
16352 : : }
16353 : :
16354 : : static int
16355 [ # # # # : 0 : __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
# # ]
16356 : : struct rte_flow_template_table *tbl,
16357 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
16358 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16359 : : {
16360 : : const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
16361 : : struct rte_flow_item items[5];
16362 : 0 : struct rte_flow_action actions[] = {
16363 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
16364 : : { .type = RTE_FLOW_ACTION_TYPE_END },
16365 : : };
16366 : 0 : struct mlx5_ctrl_flow_info flow_info = {
16367 : : .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
16368 : : };
16369 : :
16370 [ # # ]: 0 : if (!eth_spec)
16371 : : return -EINVAL;
16372 : : memset(items, 0, sizeof(items));
16373 : 0 : items[0] = (struct rte_flow_item){
16374 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
16375 : : .spec = eth_spec,
16376 : : };
16377 [ # # # ]: 0 : items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
16378 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16379 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16380 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16381 : : /* Without VLAN filtering, only a single flow rule must be created. */
16382 : 0 : return flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false);
16383 : : }
16384 : :
16385 : : static int
16386 : 0 : __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
16387 : : struct rte_flow_template_table *tbl,
16388 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
16389 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16390 : : {
16391 [ # # # # : 0 : struct mlx5_priv *priv = dev->data->dev_private;
# # ]
16392 : : const struct rte_flow_item_eth *eth_spec = __get_eth_spec(pattern_type);
16393 : : struct rte_flow_item items[5];
16394 : 0 : struct rte_flow_action actions[] = {
16395 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
16396 : : { .type = RTE_FLOW_ACTION_TYPE_END },
16397 : : };
16398 : 0 : struct mlx5_ctrl_flow_info flow_info = {
16399 : : .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
16400 : : };
16401 : : unsigned int i;
16402 : :
16403 [ # # ]: 0 : if (!eth_spec)
16404 : : return -EINVAL;
16405 : : memset(items, 0, sizeof(items));
16406 : 0 : items[0] = (struct rte_flow_item){
16407 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
16408 : : .spec = eth_spec,
16409 : : };
16410 : : /* Optional VLAN for now will be VOID - will be filled later. */
16411 [ # # # ]: 0 : items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
16412 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16413 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16414 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16415 : : /* Since VLAN filtering is done, create a single flow rule for each registered vid. */
16416 [ # # ]: 0 : for (i = 0; i < priv->vlan_filter_n; ++i) {
16417 : 0 : uint16_t vlan = priv->vlan_filter[i];
16418 : 0 : struct rte_flow_item_vlan vlan_spec = {
16419 [ # # ]: 0 : .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
16420 : : };
16421 : :
16422 : 0 : items[1].spec = &vlan_spec;
16423 [ # # ]: 0 : if (flow_hw_create_ctrl_flow(dev, dev,
16424 : : tbl, items, 0, actions, 0, &flow_info, false))
16425 : 0 : return -rte_errno;
16426 : : }
16427 : : return 0;
16428 : : }
16429 : :
16430 : : static int
16431 : 0 : __flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
16432 : : struct rte_flow_template_table *tbl,
16433 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
16434 : : const struct rte_ether_addr *addr)
16435 : : {
16436 : 0 : struct rte_flow_item_eth eth_spec = {
16437 : : .hdr.dst_addr = *addr,
16438 : : };
16439 : : struct rte_flow_item items[5];
16440 : 0 : struct rte_flow_action actions[] = {
16441 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
16442 : : { .type = RTE_FLOW_ACTION_TYPE_END },
16443 : : };
16444 [ # # # ]: 0 : struct mlx5_ctrl_flow_info flow_info = {
16445 : : .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
16446 : : .uc = {
16447 : : .dmac = *addr,
16448 : : },
16449 : : };
16450 : :
16451 : : memset(items, 0, sizeof(items));
16452 : 0 : items[0] = (struct rte_flow_item){
16453 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
16454 : : .spec = ð_spec,
16455 : : };
16456 [ # # # ]: 0 : items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VOID };
16457 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16458 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16459 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16460 : :
16461 [ # # ]: 0 : if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
16462 : 0 : return -rte_errno;
16463 : :
16464 : : return 0;
16465 : : }
16466 : :
16467 : : static int
16468 : 0 : __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
16469 : : struct rte_flow_template_table *tbl,
16470 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16471 : : {
16472 : : unsigned int i;
16473 : : int ret;
16474 : :
16475 [ # # ]: 0 : for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
16476 [ # # ]: 0 : struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
16477 : :
16478 [ # # ]: 0 : if (rte_is_zero_ether_addr(mac))
16479 : 0 : continue;
16480 : :
16481 : 0 : ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, mac);
16482 [ # # ]: 0 : if (ret < 0)
16483 : 0 : return ret;
16484 : : }
16485 : : return 0;
16486 : : }
16487 : :
16488 : : static int
16489 : 0 : __flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
16490 : : struct rte_flow_template_table *tbl,
16491 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
16492 : : const struct rte_ether_addr *addr,
16493 : : const uint16_t vid)
16494 : : {
16495 : 0 : struct rte_flow_item_eth eth_spec = {
16496 : : .hdr.dst_addr = *addr,
16497 : : };
16498 : 0 : struct rte_flow_item_vlan vlan_spec = {
16499 [ # # ]: 0 : .tci = rte_cpu_to_be_16(vid),
16500 : : };
16501 : : struct rte_flow_item items[5];
16502 : 0 : struct rte_flow_action actions[] = {
16503 : : { .type = RTE_FLOW_ACTION_TYPE_RSS },
16504 : : { .type = RTE_FLOW_ACTION_TYPE_END },
16505 : : };
16506 [ # # # ]: 0 : struct mlx5_ctrl_flow_info flow_info = {
16507 : : .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
16508 : : .uc = {
16509 : : .dmac = *addr,
16510 : : .vlan = vid,
16511 : : },
16512 : : };
16513 : :
16514 : : memset(items, 0, sizeof(items));
16515 : 0 : items[0] = (struct rte_flow_item){
16516 : : .type = RTE_FLOW_ITEM_TYPE_ETH,
16517 : : .spec = ð_spec,
16518 : : };
16519 [ # # # ]: 0 : items[1] = (struct rte_flow_item){
16520 : : .type = RTE_FLOW_ITEM_TYPE_VLAN,
16521 : : .spec = &vlan_spec,
16522 : : };
16523 [ # # # ]: 0 : items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
16524 : 0 : items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
16525 : 0 : items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
16526 : :
16527 [ # # ]: 0 : if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
16528 : 0 : return -rte_errno;
16529 : :
16530 : : return 0;
16531 : : }
16532 : :
16533 : : static int
16534 : 0 : __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
16535 : : struct rte_flow_template_table *tbl,
16536 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16537 : : {
16538 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16539 : : unsigned int i;
16540 : : unsigned int j;
16541 : :
16542 [ # # ]: 0 : for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
16543 [ # # ]: 0 : struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
16544 : :
16545 [ # # ]: 0 : if (rte_is_zero_ether_addr(mac))
16546 : 0 : continue;
16547 : :
16548 [ # # ]: 0 : for (j = 0; j < priv->vlan_filter_n; ++j) {
16549 : 0 : uint16_t vlan = priv->vlan_filter[j];
16550 : : int ret;
16551 : :
16552 : 0 : ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tbl, rss_type,
16553 : : mac, vlan);
16554 [ # # ]: 0 : if (ret < 0)
16555 : 0 : return ret;
16556 : : }
16557 : : }
16558 : : return 0;
16559 : : }
16560 : :
16561 : : static int
16562 : 0 : __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
16563 : : struct rte_flow_template_table *tbl,
16564 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
16565 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
16566 : : {
16567 [ # # # # : 0 : switch (pattern_type) {
# ]
16568 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL:
16569 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST:
16570 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST:
16571 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST:
16572 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST:
16573 : 0 : return __flow_hw_ctrl_flows_single(dev, tbl, pattern_type, rss_type);
16574 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN:
16575 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN:
16576 : : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
16577 : 0 : return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
16578 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
16579 : 0 : return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type);
16580 : 0 : case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
16581 : 0 : return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type);
16582 : 0 : default:
16583 : : /* Should not reach here. */
16584 : : MLX5_ASSERT(false);
16585 : 0 : rte_errno = EINVAL;
16586 : 0 : return -EINVAL;
16587 : : }
16588 : : }
16589 : :
16590 : :
16591 : : int
16592 : 0 : mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
16593 : : {
16594 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16595 : : struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
16596 : : unsigned int i;
16597 : : int j;
16598 : : int ret = 0;
16599 : :
16600 : : RTE_SET_USED(priv);
16601 : : RTE_SET_USED(flags);
16602 [ # # ]: 0 : if (!priv->dr_ctx) {
16603 : 0 : DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
16604 : : "HWS needs to be configured beforehand.",
16605 : : dev->data->port_id);
16606 : 0 : return 0;
16607 : : }
16608 [ # # ]: 0 : if (!priv->hw_ctrl_rx) {
16609 : 0 : DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
16610 : : dev->data->port_id);
16611 : 0 : rte_errno = EINVAL;
16612 : 0 : return -rte_errno;
16613 : : }
16614 : : hw_ctrl_rx = priv->hw_ctrl_rx;
16615 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX; ++i) {
16616 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type = i;
16617 : :
16618 [ # # ]: 0 : if (!eth_pattern_type_is_requested(eth_pattern_type, flags))
16619 : 0 : continue;
16620 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
16621 : 0 : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
16622 : : struct rte_flow_actions_template *at;
16623 : : struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[i][j];
16624 : 0 : const struct mlx5_flow_template_table_cfg cfg = {
16625 : : .attr = tmpls->attr,
16626 : : .external = 0,
16627 : : };
16628 : :
16629 [ # # ]: 0 : if (!hw_ctrl_rx->rss[rss_type]) {
16630 : 0 : at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
16631 [ # # ]: 0 : if (!at)
16632 : 0 : return -rte_errno;
16633 : 0 : hw_ctrl_rx->rss[rss_type] = at;
16634 : : } else {
16635 : 0 : at = hw_ctrl_rx->rss[rss_type];
16636 : : }
16637 [ # # ]: 0 : if (!rss_type_is_requested(priv, rss_type))
16638 : 0 : continue;
16639 [ # # ]: 0 : if (!tmpls->tbl) {
16640 : 0 : tmpls->tbl = flow_hw_table_create(dev, &cfg,
16641 : : &tmpls->pt, 1, &at, 1, NULL);
16642 [ # # ]: 0 : if (!tmpls->tbl) {
16643 : 0 : DRV_LOG(ERR, "port %u Failed to create template table "
16644 : : "for control flow rules. Unable to create "
16645 : : "control flow rules.",
16646 : : dev->data->port_id);
16647 : 0 : return -rte_errno;
16648 : : }
16649 : : }
16650 : :
16651 : 0 : ret = __flow_hw_ctrl_flows(dev, tmpls->tbl, eth_pattern_type, rss_type);
16652 [ # # ]: 0 : if (ret) {
16653 : 0 : DRV_LOG(ERR, "port %u Failed to create control flow rule.",
16654 : : dev->data->port_id);
16655 : 0 : return ret;
16656 : : }
16657 : : }
16658 : : }
16659 : : return 0;
16660 : : }
16661 : :
16662 : : static int
16663 : 0 : mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev,
16664 : : const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
16665 : : const struct rte_ether_addr *addr,
16666 : : const uint16_t vlan)
16667 : : {
16668 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16669 : : struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
16670 : : unsigned int j;
16671 : : int ret = 0;
16672 : :
16673 [ # # ]: 0 : if (!priv->dr_ctx) {
16674 : 0 : DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
16675 : : "HWS needs to be configured beforehand.",
16676 : : dev->data->port_id);
16677 : 0 : return 0;
16678 : : }
16679 [ # # ]: 0 : if (!priv->hw_ctrl_rx) {
16680 : 0 : DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
16681 : : dev->data->port_id);
16682 : 0 : rte_errno = EINVAL;
16683 : 0 : return -rte_errno;
16684 : : }
16685 : : hw_ctrl_rx = priv->hw_ctrl_rx;
16686 : :
16687 : : /* TODO: this part should be somehow refactored. It's common with common flow creation. */
16688 [ # # ]: 0 : for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
16689 : : const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
16690 : : const unsigned int pti = eth_pattern_type;
16691 : : struct rte_flow_actions_template *at;
16692 : : struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[pti][j];
16693 : 0 : const struct mlx5_flow_template_table_cfg cfg = {
16694 : : .attr = tmpls->attr,
16695 : : .external = 0,
16696 : : };
16697 : :
16698 [ # # ]: 0 : if (!hw_ctrl_rx->rss[rss_type]) {
16699 : 0 : at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
16700 [ # # ]: 0 : if (!at)
16701 : 0 : return -rte_errno;
16702 : 0 : hw_ctrl_rx->rss[rss_type] = at;
16703 : : } else {
16704 : 0 : at = hw_ctrl_rx->rss[rss_type];
16705 : : }
16706 [ # # ]: 0 : if (!rss_type_is_requested(priv, rss_type))
16707 : 0 : continue;
16708 [ # # ]: 0 : if (!tmpls->tbl) {
16709 : 0 : tmpls->tbl = flow_hw_table_create(dev, &cfg,
16710 : : &tmpls->pt, 1, &at, 1, NULL);
16711 [ # # ]: 0 : if (!tmpls->tbl) {
16712 : 0 : DRV_LOG(ERR, "port %u Failed to create template table "
16713 : : "for control flow rules. Unable to create "
16714 : : "control flow rules.",
16715 : : dev->data->port_id);
16716 : 0 : return -rte_errno;
16717 : : }
16718 : : }
16719 : :
16720 : : MLX5_ASSERT(eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC ||
16721 : : eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN);
16722 : :
16723 [ # # ]: 0 : if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC)
16724 : 0 : ret = __flow_hw_ctrl_flows_unicast_create(dev, tmpls->tbl, rss_type, addr);
16725 : : else
16726 : 0 : ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tmpls->tbl, rss_type,
16727 : : addr, vlan);
16728 [ # # ]: 0 : if (ret) {
16729 : 0 : DRV_LOG(ERR, "port %u Failed to create unicast control flow rule.",
16730 : : dev->data->port_id);
16731 : 0 : return ret;
16732 : : }
16733 : : }
16734 : :
16735 : : return 0;
16736 : : }
16737 : :
16738 : : int
16739 : 0 : mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
16740 : : const struct rte_ether_addr *addr)
16741 : : {
16742 : 0 : return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
16743 : : addr, 0);
16744 : : }
16745 : :
16746 : :
16747 : : int
16748 : 0 : mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
16749 : : const struct rte_ether_addr *addr)
16750 : : {
16751 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16752 : : struct mlx5_ctrl_flow_entry *entry;
16753 : : struct mlx5_ctrl_flow_entry *tmp;
16754 : : int ret;
16755 : :
16756 : : /*
16757 : : * HWS does not have automatic RSS flow expansion,
16758 : : * so each variant of the control flow rule is a separate entry in the list.
16759 : : * In that case, the whole list must be traversed.
16760 : : */
16761 : 0 : entry = LIST_FIRST(&priv->hw_ctrl_flows);
16762 [ # # ]: 0 : while (entry != NULL) {
16763 : 0 : tmp = LIST_NEXT(entry, next);
16764 : :
16765 [ # # # # ]: 0 : if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
16766 : : !rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
16767 : : entry = tmp;
16768 : 0 : continue;
16769 : : }
16770 : :
16771 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
16772 [ # # ]: 0 : LIST_REMOVE(entry, next);
16773 : 0 : mlx5_free(entry);
16774 [ # # ]: 0 : if (ret)
16775 : 0 : return ret;
16776 : :
16777 : : entry = tmp;
16778 : : }
16779 : : return 0;
16780 : : }
16781 : :
16782 : : int
16783 : 0 : mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
16784 : : const struct rte_ether_addr *addr,
16785 : : const uint16_t vlan)
16786 : : {
16787 : 0 : return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
16788 : : addr, vlan);
16789 : : }
16790 : :
16791 : : int
16792 : 0 : mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
16793 : : const struct rte_ether_addr *addr,
16794 : : const uint16_t vlan)
16795 : : {
16796 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16797 : : struct mlx5_ctrl_flow_entry *entry;
16798 : : struct mlx5_ctrl_flow_entry *tmp;
16799 : : int ret;
16800 : :
16801 : : /*
16802 : : * HWS does not have automatic RSS flow expansion,
16803 : : * so each variant of the control flow rule is a separate entry in the list.
16804 : : * In that case, the whole list must be traversed.
16805 : : */
16806 : 0 : entry = LIST_FIRST(&priv->hw_ctrl_flows);
16807 [ # # ]: 0 : while (entry != NULL) {
16808 : 0 : tmp = LIST_NEXT(entry, next);
16809 : :
16810 [ # # # # ]: 0 : if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
16811 : 0 : !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
16812 [ # # ]: 0 : vlan != entry->info.uc.vlan) {
16813 : : entry = tmp;
16814 : 0 : continue;
16815 : : }
16816 : :
16817 : 0 : ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
16818 [ # # ]: 0 : LIST_REMOVE(entry, next);
16819 : 0 : mlx5_free(entry);
16820 [ # # ]: 0 : if (ret)
16821 : 0 : return ret;
16822 : :
16823 : : entry = tmp;
16824 : : }
16825 : : return 0;
16826 : : }
16827 : :
16828 : : struct mlx5_ecpri_parser_profile *
16829 : 0 : mlx5_flow_hw_get_ecpri_parser_profile(void *dr_ctx)
16830 : : {
16831 : : uint16_t port_id;
16832 : : bool found = false;
16833 : : struct mlx5_priv *priv;
16834 : :
16835 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, NULL) {
16836 : 0 : priv = rte_eth_devices[port_id].data->dev_private;
16837 [ # # ]: 0 : if (priv->dr_ctx == dr_ctx) {
16838 : : found = true;
16839 : : break;
16840 : : }
16841 : : }
16842 [ # # ]: 0 : if (found)
16843 : 0 : return &priv->sh->ecpri_parser;
16844 : 0 : rte_errno = ENODEV;
16845 : 0 : return NULL;
16846 : : }
16847 : :
16848 : :
16849 : : static __rte_always_inline uint32_t
16850 : : mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain,
16851 : : bool fdb_unified_en)
16852 : : {
16853 : : uint32_t tbl_type;
16854 : :
16855 [ # # ]: 0 : if (domain->transfer)
16856 : : tbl_type = (fdb_unified_en ?
16857 : : (MLX5DR_ACTION_FLAG_HWS_FDB_RX |
16858 : : MLX5DR_ACTION_FLAG_HWS_FDB_TX |
16859 [ # # ]: 0 : MLX5DR_ACTION_FLAG_HWS_FDB_UNIFIED) :
16860 : : MLX5DR_ACTION_FLAG_HWS_FDB);
16861 [ # # ]: 0 : else if (domain->egress)
16862 : : tbl_type = MLX5DR_ACTION_FLAG_HWS_TX;
16863 [ # # ]: 0 : else if (domain->ingress)
16864 : : tbl_type = MLX5DR_ACTION_FLAG_HWS_RX;
16865 : : else
16866 : : tbl_type = UINT32_MAX;
16867 : : return tbl_type;
16868 : : }
16869 : :
16870 : : static struct mlx5_hw_encap_decap_action *
16871 : 0 : __mlx5_reformat_create(struct rte_eth_dev *dev,
16872 : : const struct rte_flow_action_raw_encap *encap_conf,
16873 : : const struct rte_flow_indir_action_conf *domain,
16874 : : enum mlx5dr_action_type type)
16875 : : {
16876 [ # # ]: 0 : struct mlx5_priv *priv = dev->data->dev_private;
16877 : : struct mlx5_hw_encap_decap_action *handle;
16878 : : struct mlx5dr_action_reformat_header hdr;
16879 : : uint32_t flags;
16880 : : bool unified_fdb = is_unified_fdb(priv);
16881 : :
16882 : : flags = mlx5_reformat_domain_to_tbl_type(domain, unified_fdb);
16883 : 0 : flags |= (uint32_t)MLX5DR_ACTION_FLAG_SHARED;
16884 [ # # ]: 0 : if (flags == UINT32_MAX) {
16885 : 0 : DRV_LOG(ERR, "Reformat: invalid indirect action configuration");
16886 : 0 : return NULL;
16887 : : }
16888 : : /* Allocate new list entry. */
16889 : 0 : handle = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*handle), 0, SOCKET_ID_ANY);
16890 [ # # ]: 0 : if (!handle) {
16891 : 0 : DRV_LOG(ERR, "Reformat: failed to allocate reformat entry");
16892 : 0 : return NULL;
16893 : : }
16894 : 0 : handle->action_type = type;
16895 [ # # ]: 0 : hdr.sz = encap_conf ? encap_conf->size : 0;
16896 [ # # ]: 0 : hdr.data = encap_conf ? encap_conf->data : NULL;
16897 : 0 : handle->action = mlx5dr_action_create_reformat(priv->dr_ctx,
16898 : : type, 1, &hdr, 0, flags);
16899 [ # # ]: 0 : if (!handle->action) {
16900 : 0 : DRV_LOG(ERR, "Reformat: failed to create reformat action");
16901 : 0 : mlx5_free(handle);
16902 : 0 : return NULL;
16903 : : }
16904 : : return handle;
16905 : : }
16906 : :
16907 : : /**
16908 : : * Create mlx5 reformat action.
16909 : : *
16910 : : * @param[in] dev
16911 : : * Pointer to rte_eth_dev structure.
16912 : : * @param[in] conf
16913 : : * Pointer to the indirect action parameters.
16914 : : * @param[in] encap_action
16915 : : * Pointer to the raw_encap action configuration.
16916 : : * @param[in] decap_action
16917 : : * Pointer to the raw_decap action configuration.
16918 : : * @param[out] error
16919 : : * Pointer to error structure.
16920 : : *
16921 : : * @return
16922 : : * A valid shared action handle in case of success, NULL otherwise and
16923 : : * rte_errno is set.
16924 : : */
16925 : : struct mlx5_hw_encap_decap_action*
16926 : 0 : mlx5_reformat_action_create(struct rte_eth_dev *dev,
16927 : : const struct rte_flow_indir_action_conf *conf,
16928 : : const struct rte_flow_action *encap_action,
16929 : : const struct rte_flow_action *decap_action,
16930 : : struct rte_flow_error *error)
16931 : : {
16932 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
16933 : : struct mlx5_hw_encap_decap_action *handle;
16934 : : const struct rte_flow_action_raw_encap *encap = NULL;
16935 : : const struct rte_flow_action_raw_decap *decap = NULL;
16936 : : enum mlx5dr_action_type type = MLX5DR_ACTION_TYP_LAST;
16937 : :
16938 : : MLX5_ASSERT(!encap_action || encap_action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP);
16939 : : MLX5_ASSERT(!decap_action || decap_action->type == RTE_FLOW_ACTION_TYPE_RAW_DECAP);
16940 [ # # ]: 0 : if (priv->sh->config.dv_flow_en != 2) {
16941 : 0 : rte_flow_error_set(error, ENOTSUP,
16942 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16943 : : "Reformat: hardware does not support");
16944 : 0 : return NULL;
16945 : : }
16946 [ # # # # ]: 0 : if (!conf || (conf->transfer + conf->egress + conf->ingress != 1)) {
16947 : 0 : rte_flow_error_set(error, EINVAL,
16948 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16949 : : "Reformat: domain should be specified");
16950 : 0 : return NULL;
16951 : : }
16952 [ # # # # : 0 : if ((encap_action && !encap_action->conf) || (decap_action && !decap_action->conf)) {
# # # # ]
16953 : 0 : rte_flow_error_set(error, EINVAL,
16954 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16955 : : "Reformat: missed action configuration");
16956 : 0 : return NULL;
16957 : : }
16958 [ # # ]: 0 : if (encap_action && !decap_action) {
16959 : 0 : encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16960 [ # # ]: 0 : if (!encap->size || encap->size > MLX5_ENCAP_MAX_LEN ||
16961 : : encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16962 : 0 : rte_flow_error_set(error, EINVAL,
16963 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16964 : : "Reformat: Invalid encap length");
16965 : 0 : return NULL;
16966 : : }
16967 : : type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
16968 [ # # ]: 0 : } else if (decap_action && !encap_action) {
16969 : 0 : decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16970 [ # # ]: 0 : if (!decap->size || decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16971 : 0 : rte_flow_error_set(error, EINVAL,
16972 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16973 : : "Reformat: Invalid decap length");
16974 : 0 : return NULL;
16975 : : }
16976 : : type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2;
16977 [ # # ]: 0 : } else if (encap_action && decap_action) {
16978 : 0 : decap = (const struct rte_flow_action_raw_decap *)decap_action->conf;
16979 : 0 : encap = (const struct rte_flow_action_raw_encap *)encap_action->conf;
16980 [ # # ]: 0 : if (decap->size < MLX5_ENCAPSULATION_DECISION_SIZE &&
16981 [ # # # # ]: 0 : encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16982 : : encap->size <= MLX5_ENCAP_MAX_LEN) {
16983 : : type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3;
16984 [ # # ]: 0 : } else if (decap->size >= MLX5_ENCAPSULATION_DECISION_SIZE &&
16985 [ # # ]: 0 : encap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
16986 : : type = MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2;
16987 : : } else {
16988 : 0 : rte_flow_error_set(error, EINVAL,
16989 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16990 : : "Reformat: Invalid decap & encap length");
16991 : 0 : return NULL;
16992 : : }
16993 [ # # ]: 0 : } else if (!encap_action && !decap_action) {
16994 : 0 : rte_flow_error_set(error, EINVAL,
16995 : : RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
16996 : : "Reformat: Invalid decap & encap configurations");
16997 : 0 : return NULL;
16998 : : }
16999 [ # # ]: 0 : if (!priv->dr_ctx) {
17000 : 0 : rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
17001 : : encap_action, "Reformat: HWS not supported");
17002 : 0 : return NULL;
17003 : : }
17004 : 0 : handle = __mlx5_reformat_create(dev, encap, conf, type);
17005 [ # # ]: 0 : if (!handle) {
17006 : 0 : rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, encap_action,
17007 : : "Reformat: failed to create indirect action");
17008 : 0 : return NULL;
17009 : : }
17010 : : return handle;
17011 : : }
17012 : :
17013 : : /**
17014 : : * Destroy the indirect reformat action.
17015 : : * Release action related resources on the NIC and the memory.
17016 : : * Lock free, (mutex should be acquired by caller).
17017 : : *
17018 : : * @param[in] dev
17019 : : * Pointer to the Ethernet device structure.
17020 : : * @param[in] handle
17021 : : * The indirect action list handle to be removed.
17022 : : * @param[out] error
17023 : : * Perform verbose error reporting if not NULL. Initialized in case of
17024 : : * error only.
17025 : : *
17026 : : * @return
17027 : : * 0 on success, otherwise negative errno value.
17028 : : */
17029 : : int
17030 : 0 : mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
17031 : : struct rte_flow_action_list_handle *handle,
17032 : : struct rte_flow_error *error)
17033 : : {
17034 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
17035 : : struct mlx5_hw_encap_decap_action *action;
17036 : :
17037 : : action = (struct mlx5_hw_encap_decap_action *)handle;
17038 [ # # # # ]: 0 : if (!priv->dr_ctx || !action)
17039 : 0 : return rte_flow_error_set(error, ENOTSUP,
17040 : : RTE_FLOW_ERROR_TYPE_ACTION, handle,
17041 : : "Reformat: invalid action handle");
17042 : 0 : mlx5dr_action_destroy(action->action);
17043 : 0 : mlx5_free(handle);
17044 : 0 : return 0;
17045 : : }
17046 : :
17047 : : static bool
17048 : 0 : flow_hw_is_item_masked(const struct rte_flow_item *item)
17049 : : {
17050 : : const uint8_t *byte;
17051 : : int size;
17052 : : int i;
17053 : :
17054 [ # # ]: 0 : if (item->mask == NULL)
17055 : : return false;
17056 : :
17057 [ # # ]: 0 : switch ((int)item->type) {
17058 : : case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
17059 : : size = sizeof(struct rte_flow_item_tag);
17060 : : break;
17061 : : case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
17062 : : size = sizeof(struct mlx5_rte_flow_item_sq);
17063 : : break;
17064 : 0 : default:
17065 : 0 : size = rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_MASK, NULL, 0, item, NULL);
17066 : : /*
17067 : : * Pattern template items are passed to this function.
17068 : : * These items were already validated, so error is not expected.
17069 : : * Also, if mask is NULL, then spec size is bigger than 0 always.
17070 : : */
17071 : : MLX5_ASSERT(size > 0);
17072 : : }
17073 : :
17074 : 0 : byte = (const uint8_t *)item->mask;
17075 [ # # ]: 0 : for (i = 0; i < size; ++i)
17076 [ # # ]: 0 : if (byte[i])
17077 : : return true;
17078 : :
17079 : : return false;
17080 : : }
17081 : :
17082 : : static int
17083 : 0 : flow_hw_validate_rule_pattern(struct rte_eth_dev *dev,
17084 : : const struct rte_flow_template_table *table,
17085 : : const uint8_t pattern_template_idx,
17086 : : const struct rte_flow_item items[],
17087 : : struct rte_flow_error *error)
17088 : : {
17089 : : const struct rte_flow_pattern_template *pt;
17090 : : const struct rte_flow_item *pt_item;
17091 : :
17092 [ # # ]: 0 : if (pattern_template_idx >= table->nb_item_templates)
17093 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17094 : : "Pattern template index out of range");
17095 : :
17096 : 0 : pt = table->its[pattern_template_idx];
17097 : 0 : pt_item = pt->items;
17098 : :
17099 : : /* If any item was prepended, skip it. */
17100 [ # # ]: 0 : if (pt->implicit_port || pt->implicit_tag)
17101 : 0 : pt_item++;
17102 : :
17103 [ # # ]: 0 : for (; pt_item->type != RTE_FLOW_ITEM_TYPE_END; pt_item++, items++) {
17104 [ # # ]: 0 : if (pt_item->type != items->type)
17105 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
17106 : : items, "Item type does not match the template");
17107 : :
17108 : : /*
17109 : : * Assumptions:
17110 : : * - Currently mlx5dr layer contains info on which fields in masks are supported.
17111 : : * - This info is not exposed to PMD directly.
17112 : : * - Because of that, it is assumed that since pattern template is correct,
17113 : : * then, items' masks in pattern template have nonzero values only in
17114 : : * supported fields.
17115 : : * This is known, because a temporary mlx5dr matcher is created during pattern
17116 : : * template creation to validate the template.
17117 : : * - As a result, it is safe to look for nonzero bytes in mask to determine if
17118 : : * item spec is needed in a flow rule.
17119 : : */
17120 [ # # ]: 0 : if (!flow_hw_is_item_masked(pt_item))
17121 : 0 : continue;
17122 : :
17123 [ # # ]: 0 : if (items->spec == NULL)
17124 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
17125 : : items, "Item spec is required");
17126 : :
17127 [ # # # # ]: 0 : switch (items->type) {
17128 : : const struct rte_flow_item_ethdev *ethdev;
17129 : : const struct rte_flow_item_tx_queue *tx_queue;
17130 : : const struct rte_flow_item_conntrack *spec;
17131 : : struct mlx5_txq_ctrl *txq;
17132 : :
17133 : 0 : case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
17134 : : ethdev = items->spec;
17135 [ # # ]: 0 : if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
17136 : 0 : return rte_flow_error_set(error, EINVAL,
17137 : : RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
17138 : : "Invalid port");
17139 : : }
17140 : : break;
17141 : 0 : case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
17142 : : tx_queue = items->spec;
17143 [ # # # # ]: 0 : if (mlx5_is_external_txq(dev, tx_queue->tx_queue))
17144 : 0 : continue;
17145 : 0 : txq = mlx5_txq_get(dev, tx_queue->tx_queue);
17146 [ # # ]: 0 : if (!txq)
17147 : 0 : return rte_flow_error_set(error, EINVAL,
17148 : : RTE_FLOW_ERROR_TYPE_ITEM_SPEC, items,
17149 : : "Invalid Tx queue");
17150 : 0 : mlx5_txq_release(dev, tx_queue->tx_queue);
17151 : 0 : break;
17152 : 0 : case RTE_FLOW_ITEM_TYPE_CONNTRACK:
17153 : : spec = items->spec;
17154 [ # # ]: 0 : if (spec->flags & ~MLX5_FLOW_CONNTRACK_PKT_STATE_ALL)
17155 : 0 : return rte_flow_error_set(error, EINVAL,
17156 : : RTE_FLOW_ERROR_TYPE_ITEM,
17157 : : NULL,
17158 : : "Invalid CT item flags");
17159 : : break;
17160 : : default:
17161 : : break;
17162 : : }
17163 : : }
17164 : :
17165 : : return 0;
17166 : : }
17167 : :
17168 : : static bool
17169 : 0 : flow_hw_valid_indirect_action_type(const struct rte_flow_action *user_action,
17170 : : const enum rte_flow_action_type expected_type)
17171 : : {
17172 : 0 : uint32_t user_indirect_type = MLX5_INDIRECT_ACTION_TYPE_GET(user_action->conf);
17173 : : uint32_t expected_indirect_type;
17174 : :
17175 [ # # # # : 0 : switch ((int)expected_type) {
# # # ]
17176 : : case RTE_FLOW_ACTION_TYPE_RSS:
17177 : : case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
17178 : : expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_RSS;
17179 : : break;
17180 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
17181 : : case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
17182 : : expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_COUNT;
17183 : 0 : break;
17184 : 0 : case RTE_FLOW_ACTION_TYPE_AGE:
17185 : : expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_AGE;
17186 : 0 : break;
17187 : 0 : case RTE_FLOW_ACTION_TYPE_CONNTRACK:
17188 : : expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT;
17189 : 0 : break;
17190 : 0 : case RTE_FLOW_ACTION_TYPE_METER_MARK:
17191 : : case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
17192 : : expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_METER_MARK;
17193 : 0 : break;
17194 : 0 : case RTE_FLOW_ACTION_TYPE_QUOTA:
17195 : : expected_indirect_type = MLX5_INDIRECT_ACTION_TYPE_QUOTA;
17196 : 0 : break;
17197 : : default:
17198 : : return false;
17199 : : }
17200 : :
17201 : 0 : return user_indirect_type == expected_indirect_type;
17202 : : }
17203 : :
17204 : : static int
17205 : 0 : flow_hw_validate_rule_actions(struct rte_eth_dev *dev,
17206 : : const struct rte_flow_template_table *table,
17207 : : const uint8_t actions_template_idx,
17208 : : const struct rte_flow_action actions[],
17209 : : struct rte_flow_error *error)
17210 : : {
17211 : : const struct rte_flow_actions_template *at;
17212 : : const struct mlx5_hw_actions *hw_acts;
17213 : : const struct mlx5_action_construct_data *act_data;
17214 : : unsigned int idx;
17215 : :
17216 [ # # ]: 0 : if (actions_template_idx >= table->nb_action_templates)
17217 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17218 : : "Actions template index out of range");
17219 : :
17220 : 0 : at = table->ats[actions_template_idx].action_template;
17221 : : hw_acts = &table->ats[actions_template_idx].acts;
17222 : :
17223 [ # # ]: 0 : for (idx = 0; actions[idx].type != RTE_FLOW_ACTION_TYPE_END; ++idx) {
17224 : : const struct rte_flow_action *user_action = &actions[idx];
17225 : 0 : const struct rte_flow_action *tmpl_action = &at->orig_actions[idx];
17226 : :
17227 [ # # ]: 0 : if (user_action->type != tmpl_action->type)
17228 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
17229 : : user_action,
17230 : : "Action type does not match type specified in "
17231 : : "actions template");
17232 : : }
17233 : :
17234 : : /*
17235 : : * Only go through unmasked actions and check if configuration is provided.
17236 : : * Configuration of masked actions is ignored.
17237 : : */
17238 [ # # ]: 0 : LIST_FOREACH(act_data, &hw_acts->act_list, next) {
17239 : : const struct rte_flow_action *user_action;
17240 : :
17241 : 0 : user_action = &actions[act_data->action_src];
17242 : :
17243 : : /* Skip actions which do not require conf. */
17244 [ # # ]: 0 : switch ((int)act_data->type) {
17245 : 0 : case RTE_FLOW_ACTION_TYPE_COUNT:
17246 : : case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
17247 : : case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK:
17248 : : case MLX5_RTE_FLOW_ACTION_TYPE_RSS:
17249 : 0 : continue;
17250 : : default:
17251 : : break;
17252 : : }
17253 : :
17254 [ # # ]: 0 : if (user_action->conf == NULL)
17255 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
17256 : : user_action,
17257 : : "Action requires configuration");
17258 : :
17259 [ # # # # : 0 : switch ((int)user_action->type) {
# # ]
17260 : : enum rte_flow_action_type expected_type;
17261 : : const struct rte_flow_action_ethdev *ethdev;
17262 : : const struct rte_flow_action_modify_field *mf;
17263 : :
17264 : 0 : case RTE_FLOW_ACTION_TYPE_INDIRECT:
17265 : 0 : expected_type = act_data->indirect.expected_type;
17266 [ # # ]: 0 : if (!flow_hw_valid_indirect_action_type(user_action, expected_type))
17267 : 0 : return rte_flow_error_set(error, EINVAL,
17268 : : RTE_FLOW_ERROR_TYPE_ACTION_CONF,
17269 : : user_action,
17270 : : "Indirect action type does not match "
17271 : : "the type specified in the mask");
17272 : : break;
17273 : 0 : case RTE_FLOW_ACTION_TYPE_QUEUE:
17274 [ # # ]: 0 : if (mlx5_flow_validate_target_queue(dev, user_action, error))
17275 : 0 : return -rte_errno;
17276 : : break;
17277 : 0 : case RTE_FLOW_ACTION_TYPE_RSS:
17278 [ # # ]: 0 : if (mlx5_validate_action_rss(dev, user_action, error))
17279 : 0 : return -rte_errno;
17280 : : break;
17281 : 0 : case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
17282 : : /* TODO: Compare other fields if needed. */
17283 : : mf = user_action->conf;
17284 [ # # ]: 0 : if (mf->operation != act_data->modify_header.action.operation ||
17285 [ # # ]: 0 : mf->src.field != act_data->modify_header.action.src.field ||
17286 [ # # ]: 0 : mf->dst.field != act_data->modify_header.action.dst.field ||
17287 [ # # ]: 0 : mf->width != act_data->modify_header.action.width)
17288 : 0 : return rte_flow_error_set(error, EINVAL,
17289 : : RTE_FLOW_ERROR_TYPE_ACTION_CONF,
17290 : : user_action,
17291 : : "Modify field configuration does not "
17292 : : "match configuration from actions "
17293 : : "template");
17294 : : break;
17295 : 0 : case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
17296 : : ethdev = user_action->conf;
17297 [ # # ]: 0 : if (flow_hw_validate_target_port_id(dev, ethdev->port_id)) {
17298 : 0 : return rte_flow_error_set(error, EINVAL,
17299 : : RTE_FLOW_ERROR_TYPE_ACTION_CONF,
17300 : : user_action, "Invalid port");
17301 : : }
17302 : : break;
17303 : : default:
17304 : : break;
17305 : : }
17306 : : }
17307 : :
17308 : : return 0;
17309 : : }
17310 : :
17311 : : static int
17312 : 0 : flow_hw_async_op_validate(struct rte_eth_dev *dev,
17313 : : const uint32_t queue,
17314 : : const struct rte_flow_template_table *table,
17315 : : struct rte_flow_error *error)
17316 : : {
17317 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
17318 : :
17319 : : MLX5_ASSERT(table != NULL);
17320 : :
17321 [ # # # # ]: 0 : if (table->cfg.external && queue >= priv->hw_attr->nb_queue)
17322 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
17323 : : "Incorrect queue");
17324 : :
17325 : : return 0;
17326 : : }
17327 : :
17328 : : /**
17329 : : * Validate user input for rte_flow_async_create() implementation.
17330 : : *
17331 : : * If RTE_PMD_MLX5_DEBUG macro is not defined, this function is a no-op.
17332 : : *
17333 : : * @param[in] dev
17334 : : * Pointer to the rte_eth_dev structure.
17335 : : * @param[in] queue
17336 : : * The queue to create the flow.
17337 : : * @param[in] table
17338 : : * Pointer to template table.
17339 : : * @param[in] rule_index
17340 : : * The item pattern flow follows from the table.
17341 : : * @param[in] items
17342 : : * Items with flow spec value.
17343 : : * @param[in] pattern_template_index
17344 : : * The item pattern flow follows from the table.
17345 : : * @param[in] actions
17346 : : * Action with flow spec value.
17347 : : * @param[in] action_template_index
17348 : : * The action pattern flow follows from the table.
17349 : : * @param[out] error
17350 : : * Pointer to error structure.
17351 : : *
17352 : : * @return
17353 : : * 0 if user input is valid.
17354 : : * Negative errno otherwise, rte_errno and error struct is populated.
17355 : : */
17356 : : static int
17357 : 0 : flow_hw_async_create_validate(struct rte_eth_dev *dev,
17358 : : const uint32_t queue,
17359 : : const struct rte_flow_template_table *table,
17360 : : enum rte_flow_table_insertion_type insertion_type,
17361 : : uint32_t rule_index,
17362 : : const struct rte_flow_item items[],
17363 : : const uint8_t pattern_template_index,
17364 : : const struct rte_flow_action actions[],
17365 : : const uint8_t action_template_index,
17366 : : struct rte_flow_error *error)
17367 : : {
17368 [ # # ]: 0 : if (flow_hw_async_op_validate(dev, queue, table, error))
17369 : 0 : return -rte_errno;
17370 : :
17371 [ # # ]: 0 : if (insertion_type != table->cfg.attr.insertion_type)
17372 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17373 : : NULL, "Flow rule insertion type mismatch with table configuration");
17374 : :
17375 [ # # ]: 0 : if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_PATTERN)
17376 [ # # ]: 0 : if (rule_index >= table->cfg.attr.nb_flows)
17377 : 0 : return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17378 : : NULL, "Flow rule index exceeds table size");
17379 : :
17380 [ # # ]: 0 : if (table->cfg.attr.insertion_type != RTE_FLOW_TABLE_INSERTION_TYPE_INDEX)
17381 [ # # ]: 0 : if (flow_hw_validate_rule_pattern(dev, table, pattern_template_index, items, error))
17382 : 0 : return -rte_errno;
17383 : :
17384 [ # # ]: 0 : if (flow_hw_validate_rule_actions(dev, table, action_template_index, actions, error))
17385 : 0 : return -rte_errno;
17386 : :
17387 : : return 0;
17388 : : }
17389 : :
17390 : : /**
17391 : : * Validate user input for rte_flow_async_update() implementation.
17392 : : *
17393 : : * If RTE_PMD_MLX5_DEBUG macro is not defined, this function is a no-op.
17394 : : *
17395 : : * @param[in] dev
17396 : : * Pointer to the rte_eth_dev structure.
17397 : : * @param[in] queue
17398 : : * The queue to create the flow.
17399 : : * @param[in] flow
17400 : : * Flow rule to be updated.
17401 : : * @param[in] actions
17402 : : * Action with flow spec value.
17403 : : * @param[in] action_template_index
17404 : : * The action pattern flow follows from the table.
17405 : : * @param[out] error
17406 : : * Pointer to error structure.
17407 : : *
17408 : : * @return
17409 : : * 0 if user input is valid.
17410 : : * Negative errno otherwise, rte_errno and error struct is set.
17411 : : */
17412 : : static int
17413 : 0 : flow_hw_async_update_validate(struct rte_eth_dev *dev,
17414 : : const uint32_t queue,
17415 : : const struct rte_flow_hw *flow,
17416 : : const struct rte_flow_action actions[],
17417 : : const uint8_t action_template_index,
17418 : : struct rte_flow_error *error)
17419 : : {
17420 [ # # ]: 0 : if (flow_hw_async_op_validate(dev, queue, flow->table, error))
17421 : 0 : return -rte_errno;
17422 : :
17423 [ # # ]: 0 : if (flow_hw_validate_rule_actions(dev, flow->table, action_template_index, actions, error))
17424 : 0 : return -rte_errno;
17425 : :
17426 : : return 0;
17427 : : }
17428 : :
17429 : : /**
17430 : : * Validate user input for rte_flow_async_destroy() implementation.
17431 : : *
17432 : : * If RTE_PMD_MLX5_DEBUG macro is not defined, this function is a no-op.
17433 : : *
17434 : : * @param[in] dev
17435 : : * Pointer to the rte_eth_dev structure.
17436 : : * @param[in] queue
17437 : : * The queue to create the flow.
17438 : : * @param[in] flow
17439 : : * Flow rule to be destroyed.
17440 : : * @param[out] error
17441 : : * Pointer to error structure.
17442 : : *
17443 : : * @return
17444 : : * 0 if user input is valid.
17445 : : * Negative errno otherwise, rte_errno and error struct is set.
17446 : : */
17447 : : static int
17448 : : flow_hw_async_destroy_validate(struct rte_eth_dev *dev,
17449 : : const uint32_t queue,
17450 : : const struct rte_flow_hw *flow,
17451 : : struct rte_flow_error *error)
17452 : : {
17453 : : if (flow_hw_async_op_validate(dev, queue, flow->table, error))
17454 : : return -rte_errno;
17455 : :
17456 : : return 0;
17457 : : }
17458 : :
17459 : : static struct rte_flow_fp_ops mlx5_flow_hw_fp_ops = {
17460 : : .async_create = flow_hw_async_flow_create,
17461 : : .async_create_by_index = flow_hw_async_flow_create_by_index,
17462 : : .async_create_by_index_with_pattern = flow_hw_async_flow_create_by_index_with_pattern,
17463 : : .async_actions_update = flow_hw_async_flow_update,
17464 : : .async_destroy = flow_hw_async_flow_destroy,
17465 : : .push = flow_hw_push,
17466 : : .pull = flow_hw_pull,
17467 : : .async_action_handle_create = flow_hw_action_handle_create,
17468 : : .async_action_handle_destroy = flow_hw_action_handle_destroy,
17469 : : .async_action_handle_update = flow_hw_action_handle_update,
17470 : : .async_action_handle_query = flow_hw_action_handle_query,
17471 : : .async_action_handle_query_update = flow_hw_async_action_handle_query_update,
17472 : : .async_action_list_handle_create = flow_hw_async_action_list_handle_create,
17473 : : .async_action_list_handle_destroy = flow_hw_async_action_list_handle_destroy,
17474 : : .async_action_list_handle_query_update =
17475 : : flow_hw_async_action_list_handle_query_update,
17476 : : };
17477 : :
17478 : : #endif
|