Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright 2015 6WIND S.A.
3 : : * Copyright 2020 Mellanox Technologies, Ltd
4 : : */
5 : :
6 : : #include <stddef.h>
7 : : #include <unistd.h>
8 : : #include <string.h>
9 : : #include <stdint.h>
10 : : #include <stdlib.h>
11 : : #include <errno.h>
12 : : #include <net/if.h>
13 : : #include <linux/rtnetlink.h>
14 : : #include <linux/sockios.h>
15 : : #include <linux/ethtool.h>
16 : : #include <fcntl.h>
17 : :
18 : : #include <rte_malloc.h>
19 : : #include <ethdev_driver.h>
20 : : #include <ethdev_pci.h>
21 : : #include <rte_pci.h>
22 : : #include <bus_driver.h>
23 : : #include <bus_pci_driver.h>
24 : : #include <bus_auxiliary_driver.h>
25 : : #include <rte_common.h>
26 : : #include <rte_kvargs.h>
27 : : #include <rte_rwlock.h>
28 : : #include <rte_spinlock.h>
29 : : #include <rte_string_fns.h>
30 : : #include <rte_alarm.h>
31 : : #include <rte_eal_paging.h>
32 : :
33 : : #include <mlx5_glue.h>
34 : : #include <mlx5_devx_cmds.h>
35 : : #include <mlx5_common.h>
36 : : #include <mlx5_common_mp.h>
37 : : #include <mlx5_common_mr.h>
38 : : #include <mlx5_malloc.h>
39 : :
40 : : #include "mlx5_defs.h"
41 : : #include "mlx5.h"
42 : : #include "mlx5_common_os.h"
43 : : #include "mlx5_utils.h"
44 : : #include "mlx5_rxtx.h"
45 : : #include "mlx5_rx.h"
46 : : #include "mlx5_tx.h"
47 : : #include "mlx5_autoconf.h"
48 : : #include "mlx5_flow.h"
49 : : #include "rte_pmd_mlx5.h"
50 : : #include "mlx5_verbs.h"
51 : : #include "mlx5_nl.h"
52 : : #include "mlx5_devx.h"
53 : :
54 : : #ifndef HAVE_IBV_MLX5_MOD_MPW
55 : : #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
56 : : #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
57 : : #endif
58 : :
59 : : #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
60 : : #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
61 : : #endif
62 : :
63 : : static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
64 : :
65 : : /* Spinlock for mlx5_shared_data allocation. */
66 : : static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
67 : :
68 : : /* Process local data for secondary processes. */
69 : : static struct mlx5_local_data mlx5_local_data;
70 : :
71 : : /* rte flow indexed pool configuration. */
72 : : static const struct mlx5_indexed_pool_config default_icfg[] = {
73 : : {
74 : : .size = sizeof(struct rte_flow),
75 : : .trunk_size = 64,
76 : : .need_lock = 1,
77 : : .release_mem_en = 0,
78 : : .malloc = mlx5_malloc,
79 : : .free = mlx5_free,
80 : : .per_core_cache = 0,
81 : : .type = "ctl_flow_ipool",
82 : : },
83 : : {
84 : : .size = sizeof(struct rte_flow),
85 : : .trunk_size = 64,
86 : : .grow_trunk = 3,
87 : : .grow_shift = 2,
88 : : .need_lock = 1,
89 : : .release_mem_en = 0,
90 : : .malloc = mlx5_malloc,
91 : : .free = mlx5_free,
92 : : .per_core_cache = 1 << 14,
93 : : .type = "rte_flow_ipool",
94 : : },
95 : : {
96 : : .size = sizeof(struct rte_flow),
97 : : .trunk_size = 64,
98 : : .grow_trunk = 3,
99 : : .grow_shift = 2,
100 : : .need_lock = 1,
101 : : .release_mem_en = 0,
102 : : .malloc = mlx5_malloc,
103 : : .free = mlx5_free,
104 : : .per_core_cache = 0,
105 : : .type = "mcp_flow_ipool",
106 : : },
107 : : };
108 : :
109 : : /**
110 : : * Set the completion channel file descriptor interrupt as non-blocking.
111 : : *
112 : : * @param[in] rxq_obj
113 : : * Pointer to RQ channel object, which includes the channel fd
114 : : *
115 : : * @param[out] fd
116 : : * The file descriptor (representing the interrupt) used in this channel.
117 : : *
118 : : * @return
119 : : * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
120 : : */
121 : : int
122 : 0 : mlx5_os_set_nonblock_channel_fd(int fd)
123 : : {
124 : : int flags;
125 : :
126 : 0 : flags = fcntl(fd, F_GETFL);
127 : 0 : return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
128 : : }
129 : :
130 : : /**
131 : : * Get mlx5 device attributes. The glue function query_device_ex() is called
132 : : * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
133 : : * device attributes from the glue out parameter.
134 : : *
135 : : * @param sh
136 : : * Pointer to shared device context.
137 : : *
138 : : * @return
139 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
140 : : */
141 : : int
142 : 0 : mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
143 : : {
144 : : int err;
145 : 0 : struct mlx5_common_device *cdev = sh->cdev;
146 : 0 : struct mlx5_hca_attr *hca_attr = &cdev->config.hca_attr;
147 : 0 : struct ibv_device_attr_ex attr_ex = { .comp_mask = 0 };
148 : 0 : struct mlx5dv_context dv_attr = { .comp_mask = 0 };
149 : :
150 : 0 : err = mlx5_glue->query_device_ex(cdev->ctx, NULL, &attr_ex);
151 [ # # ]: 0 : if (err) {
152 : 0 : rte_errno = errno;
153 : 0 : return -rte_errno;
154 : : }
155 : : #ifdef HAVE_IBV_MLX5_MOD_SWP
156 : 0 : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
157 : : #endif
158 : : #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
159 : 0 : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
160 : : #endif
161 : : #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
162 : 0 : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
163 : : #endif
164 : : #ifdef HAVE_IBV_DEVICE_ATTR_ESW_MGR_REG_C0
165 : : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_REG_C0;
166 : : #endif
167 : 0 : err = mlx5_glue->dv_query_device(cdev->ctx, &dv_attr);
168 [ # # ]: 0 : if (err) {
169 : 0 : rte_errno = errno;
170 : 0 : return -rte_errno;
171 : : }
172 : 0 : memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
173 [ # # ]: 0 : if (mlx5_dev_is_pci(cdev->dev))
174 : 0 : sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(cdev->dev));
175 : : else
176 : 0 : sh->dev_cap.sf = 1;
177 : 0 : sh->dev_cap.max_qp_wr = attr_ex.orig_attr.max_qp_wr;
178 : 0 : sh->dev_cap.max_sge = attr_ex.orig_attr.max_sge;
179 : 0 : sh->dev_cap.max_cq = attr_ex.orig_attr.max_cq;
180 : 0 : sh->dev_cap.max_qp = attr_ex.orig_attr.max_qp;
181 : : #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
182 : 0 : sh->dev_cap.dest_tir = 1;
183 : : #endif
184 : : #if defined(HAVE_IBV_FLOW_DV_SUPPORT) && defined(HAVE_MLX5DV_DR)
185 : 0 : DRV_LOG(DEBUG, "DV flow is supported.");
186 : 0 : sh->dev_cap.dv_flow_en = 1;
187 : : #endif
188 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
189 [ # # # # ]: 0 : if (hca_attr->eswitch_manager && sh->dev_cap.dv_flow_en && sh->esw_mode)
190 : 0 : sh->dev_cap.dv_esw_en = 1;
191 : : #endif
192 : : /*
193 : : * Multi-packet send is supported by ConnectX-4 Lx PF as well
194 : : * as all ConnectX-5 devices.
195 : : */
196 [ # # ]: 0 : if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
197 [ # # ]: 0 : if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
198 : 0 : DRV_LOG(DEBUG, "Enhanced MPW is supported.");
199 : 0 : sh->dev_cap.mps = MLX5_MPW_ENHANCED;
200 : : } else {
201 : 0 : DRV_LOG(DEBUG, "MPW is supported.");
202 : 0 : sh->dev_cap.mps = MLX5_MPW;
203 : : }
204 : : } else {
205 : 0 : DRV_LOG(DEBUG, "MPW isn't supported.");
206 : 0 : sh->dev_cap.mps = MLX5_MPW_DISABLED;
207 : : }
208 : : #if (RTE_CACHE_LINE_SIZE == 128)
209 : : if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)
210 : : sh->dev_cap.cqe_comp = 1;
211 : : DRV_LOG(DEBUG, "Rx CQE 128B compression is %ssupported.",
212 : : sh->dev_cap.cqe_comp ? "" : "not ");
213 : : #else
214 : 0 : sh->dev_cap.cqe_comp = 1;
215 : : #endif
216 : : #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
217 : 0 : sh->dev_cap.mpls_en =
218 : : ((dv_attr.tunnel_offloads_caps &
219 : 0 : MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
220 : : (dv_attr.tunnel_offloads_caps &
221 : : MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
222 [ # # ]: 0 : DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported.",
223 : : sh->dev_cap.mpls_en ? "" : "not ");
224 : : #else
225 : : DRV_LOG(WARNING,
226 : : "MPLS over GRE/UDP tunnel offloading disabled due to old OFED/rdma-core version or firmware configuration");
227 : : #endif
228 : : #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
229 : : sh->dev_cap.hw_padding = !!attr_ex.rx_pad_end_addr_align;
230 : : #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
231 : 0 : sh->dev_cap.hw_padding = !!(attr_ex.device_cap_flags_ex &
232 : : IBV_DEVICE_PCI_WRITE_END_PADDING);
233 : : #endif
234 : 0 : sh->dev_cap.hw_csum =
235 : 0 : !!(attr_ex.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
236 [ # # ]: 0 : DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
237 : : sh->dev_cap.hw_csum ? "" : "not ");
238 : 0 : sh->dev_cap.hw_vlan_strip = !!(attr_ex.raw_packet_caps &
239 : : IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
240 [ # # ]: 0 : DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
241 : : (sh->dev_cap.hw_vlan_strip ? "" : "not "));
242 : 0 : sh->dev_cap.hw_fcs_strip = !!(attr_ex.raw_packet_caps &
243 : : IBV_RAW_PACKET_CAP_SCATTER_FCS);
244 : : #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
245 : : !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
246 : : DRV_LOG(DEBUG, "Counters are not supported.");
247 : : #endif
248 : : /*
249 : : * DPDK doesn't support larger/variable indirection tables.
250 : : * Once DPDK supports it, take max size from device attr.
251 : : */
252 : 0 : sh->dev_cap.ind_table_max_size =
253 : 0 : RTE_MIN(attr_ex.rss_caps.max_rwq_indirection_table_size,
254 : : (unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
255 : 0 : DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
256 : : sh->dev_cap.ind_table_max_size);
257 [ # # ]: 0 : sh->dev_cap.tso = (attr_ex.tso_caps.max_tso > 0 &&
258 [ # # ]: 0 : (attr_ex.tso_caps.supported_qpts &
259 : : (1 << IBV_QPT_RAW_PACKET)));
260 [ # # ]: 0 : if (sh->dev_cap.tso)
261 : 0 : sh->dev_cap.tso_max_payload_sz = attr_ex.tso_caps.max_tso;
262 [ # # ]: 0 : strlcpy(sh->dev_cap.fw_ver, attr_ex.orig_attr.fw_ver,
263 : : sizeof(sh->dev_cap.fw_ver));
264 : : #ifdef HAVE_IBV_MLX5_MOD_SWP
265 [ # # ]: 0 : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
266 : 0 : sh->dev_cap.swp = dv_attr.sw_parsing_caps.sw_parsing_offloads &
267 : : (MLX5_SW_PARSING_CAP |
268 : : MLX5_SW_PARSING_CSUM_CAP |
269 : : MLX5_SW_PARSING_TSO_CAP);
270 : 0 : DRV_LOG(DEBUG, "SWP support: %u", sh->dev_cap.swp);
271 : : #endif
272 : : #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
273 [ # # ]: 0 : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
274 : : struct mlx5dv_striding_rq_caps *strd_rq_caps =
275 : : &dv_attr.striding_rq_caps;
276 : :
277 : 0 : sh->dev_cap.mprq.enabled = 1;
278 : 0 : sh->dev_cap.mprq.log_min_stride_size =
279 : 0 : strd_rq_caps->min_single_stride_log_num_of_bytes;
280 : 0 : sh->dev_cap.mprq.log_max_stride_size =
281 : 0 : strd_rq_caps->max_single_stride_log_num_of_bytes;
282 : 0 : sh->dev_cap.mprq.log_min_stride_num =
283 : 0 : strd_rq_caps->min_single_wqe_log_num_of_strides;
284 : 0 : sh->dev_cap.mprq.log_max_stride_num =
285 : 0 : strd_rq_caps->max_single_wqe_log_num_of_strides;
286 : 0 : sh->dev_cap.mprq.log_min_stride_wqe_size =
287 : 0 : cdev->config.devx ?
288 [ # # ]: 0 : hca_attr->log_min_stride_wqe_sz :
289 : : MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
290 : 0 : DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %u",
291 : : sh->dev_cap.mprq.log_min_stride_size);
292 : 0 : DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %u",
293 : : sh->dev_cap.mprq.log_max_stride_size);
294 : 0 : DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %u",
295 : : sh->dev_cap.mprq.log_min_stride_num);
296 : 0 : DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %u",
297 : : sh->dev_cap.mprq.log_max_stride_num);
298 : 0 : DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %u",
299 : : sh->dev_cap.mprq.log_min_stride_wqe_size);
300 : 0 : DRV_LOG(DEBUG, "\tsupported_qpts: %d",
301 : : strd_rq_caps->supported_qpts);
302 : 0 : DRV_LOG(DEBUG, "Device supports Multi-Packet RQ.");
303 : : }
304 : : #endif
305 : : #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
306 [ # # ]: 0 : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
307 : 0 : sh->dev_cap.tunnel_en = dv_attr.tunnel_offloads_caps &
308 : : (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
309 : : MLX5_TUNNELED_OFFLOADS_GRE_CAP |
310 : : MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
311 : : }
312 [ # # ]: 0 : if (sh->dev_cap.tunnel_en) {
313 [ # # # # : 0 : DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
# # ]
314 : : sh->dev_cap.tunnel_en &
315 : : MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
316 : : sh->dev_cap.tunnel_en &
317 : : MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
318 : : sh->dev_cap.tunnel_en &
319 : : MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
320 : : } else {
321 : 0 : DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
322 : : }
323 : : #else
324 : : DRV_LOG(WARNING,
325 : : "Tunnel offloading disabled due to old OFED/rdma-core version");
326 : : #endif
327 [ # # ]: 0 : if (!sh->cdev->config.devx)
328 : : return 0;
329 : : /* Check capabilities for Packet Pacing. */
330 : 0 : DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz.",
331 : : hca_attr->dev_freq_khz);
332 [ # # ]: 0 : DRV_LOG(DEBUG, "Packet pacing is %ssupported.",
333 : : hca_attr->qos.packet_pacing ? "" : "not ");
334 [ # # ]: 0 : DRV_LOG(DEBUG, "Cross channel ops are %ssupported.",
335 : : hca_attr->cross_channel ? "" : "not ");
336 [ # # ]: 0 : DRV_LOG(DEBUG, "WQE index ignore is %ssupported.",
337 : : hca_attr->wqe_index_ignore ? "" : "not ");
338 [ # # ]: 0 : DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported.",
339 : : hca_attr->non_wire_sq ? "" : "not ");
340 [ # # ]: 0 : DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
341 : : hca_attr->log_max_static_sq_wq ? "" : "not ",
342 : : hca_attr->log_max_static_sq_wq);
343 [ # # ]: 0 : DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported.",
344 : : hca_attr->qos.wqe_rate_pp ? "" : "not ");
345 : 0 : sh->dev_cap.txpp_en = hca_attr->qos.packet_pacing;
346 [ # # ]: 0 : if (!hca_attr->cross_channel) {
347 : 0 : DRV_LOG(DEBUG,
348 : : "Cross channel operations are required for packet pacing.");
349 : 0 : sh->dev_cap.txpp_en = 0;
350 : : }
351 [ # # ]: 0 : if (!hca_attr->wqe_index_ignore) {
352 : 0 : DRV_LOG(DEBUG,
353 : : "WQE index ignore feature is required for packet pacing.");
354 : 0 : sh->dev_cap.txpp_en = 0;
355 : : }
356 [ # # ]: 0 : if (!hca_attr->non_wire_sq) {
357 : 0 : DRV_LOG(DEBUG,
358 : : "Non-wire SQ feature is required for packet pacing.");
359 : 0 : sh->dev_cap.txpp_en = 0;
360 : : }
361 [ # # ]: 0 : if (!hca_attr->log_max_static_sq_wq) {
362 : 0 : DRV_LOG(DEBUG,
363 : : "Static WQE SQ feature is required for packet pacing.");
364 : 0 : sh->dev_cap.txpp_en = 0;
365 : : }
366 [ # # ]: 0 : if (!hca_attr->qos.wqe_rate_pp) {
367 : 0 : DRV_LOG(DEBUG,
368 : : "WQE rate mode is required for packet pacing.");
369 : 0 : sh->dev_cap.txpp_en = 0;
370 : : }
371 : : #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
372 : : DRV_LOG(DEBUG,
373 : : "DevX does not provide UAR offset, can't create queues for packet pacing.");
374 : : sh->dev_cap.txpp_en = 0;
375 : : #endif
376 : 0 : sh->dev_cap.scatter_fcs_w_decap_disable =
377 : 0 : hca_attr->scatter_fcs_w_decap_disable;
378 : 0 : sh->dev_cap.rq_delay_drop_en = hca_attr->rq_delay_drop;
379 : 0 : mlx5_rt_timestamp_config(sh, hca_attr);
380 : : #ifdef HAVE_IBV_DEVICE_ATTR_ESW_MGR_REG_C0
381 : : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_REG_C0) {
382 : : sh->dev_cap.esw_info.regc_value = dv_attr.reg_c0.value;
383 : : sh->dev_cap.esw_info.regc_mask = dv_attr.reg_c0.mask;
384 : : }
385 : : #else
386 : 0 : sh->dev_cap.esw_info.regc_value = 0;
387 : 0 : sh->dev_cap.esw_info.regc_mask = 0;
388 : : #endif
389 : 0 : return 0;
390 : : }
391 : :
392 : : /**
393 : : * Detect misc5 support or not
394 : : *
395 : : * @param[in] priv
396 : : * Device private data pointer
397 : : */
398 : : #ifdef HAVE_MLX5DV_DR
399 : : static void
400 : 0 : __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
401 : : {
402 : : #ifdef HAVE_IBV_FLOW_DV_SUPPORT
403 : : /* Dummy VxLAN matcher to detect rdma-core misc5 cap
404 : : * Case: IPv4--->UDP--->VxLAN--->vni
405 : : */
406 : : void *tbl;
407 : : struct mlx5_flow_dv_match_params matcher_mask;
408 : : void *match_m;
409 : : void *matcher;
410 : : void *headers_m;
411 : : void *misc5_m;
412 : : uint32_t *tunnel_header_m;
413 : : struct mlx5dv_flow_matcher_attr dv_attr;
414 : :
415 : : memset(&matcher_mask, 0, sizeof(matcher_mask));
416 : 0 : matcher_mask.size = sizeof(matcher_mask.buf);
417 : : match_m = matcher_mask.buf;
418 : : headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
419 : : misc5_m = MLX5_ADDR_OF(fte_match_param,
420 : : match_m, misc_parameters_5);
421 : : tunnel_header_m = (uint32_t *)
422 : : MLX5_ADDR_OF(fte_match_set_misc5,
423 : : misc5_m, tunnel_header_1);
424 : : MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
425 [ # # ]: 0 : MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 4);
426 : 0 : MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
427 : 0 : *tunnel_header_m = 0xffffff;
428 : :
429 : 0 : tbl = mlx5_glue->dr_create_flow_tbl(priv->sh->rx_domain, 1);
430 [ # # ]: 0 : if (!tbl) {
431 : 0 : DRV_LOG(INFO, "No SW steering support");
432 : 0 : return;
433 : : }
434 : 0 : dv_attr.type = IBV_FLOW_ATTR_NORMAL,
435 : 0 : dv_attr.match_mask = (void *)&matcher_mask,
436 : 0 : dv_attr.match_criteria_enable =
437 : : (1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT) |
438 : : (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT);
439 : 0 : dv_attr.priority = 3;
440 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
441 : : void *misc2_m;
442 [ # # ]: 0 : if (priv->sh->config.dv_esw_en) {
443 : : /* FDB enabled reg_c_0 */
444 : 0 : dv_attr.match_criteria_enable |=
445 : : (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT);
446 : : misc2_m = MLX5_ADDR_OF(fte_match_param,
447 : : match_m, misc_parameters_2);
448 [ # # ]: 0 : MLX5_SET(fte_match_set_misc2, misc2_m,
449 : : metadata_reg_c_0, 0xffff);
450 : : }
451 : : #endif
452 : 0 : matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->cdev->ctx,
453 : : &dv_attr, tbl);
454 [ # # ]: 0 : if (matcher) {
455 : 0 : priv->sh->misc5_cap = 1;
456 : 0 : mlx5_glue->dv_destroy_flow_matcher(matcher);
457 : : }
458 : 0 : mlx5_glue->dr_destroy_flow_tbl(tbl);
459 : : #else
460 : : RTE_SET_USED(priv);
461 : : #endif
462 : : }
463 : : #endif
464 : :
465 : : /**
466 : : * Initialize DR related data within private structure.
467 : : * Routine checks the reference counter and does actual
468 : : * resources creation/initialization only if counter is zero.
469 : : *
470 : : * @param[in] eth_dev
471 : : * Pointer to the device.
472 : : *
473 : : * @return
474 : : * Zero on success, positive error code otherwise.
475 : : */
476 : : static int
477 : 0 : mlx5_alloc_shared_dr(struct rte_eth_dev *eth_dev)
478 : : {
479 : 0 : struct mlx5_priv *priv = eth_dev->data->dev_private;
480 : 0 : struct mlx5_dev_ctx_shared *sh = priv->sh;
481 : : char s[MLX5_NAME_SIZE] __rte_unused;
482 : : int err;
483 : :
484 : : MLX5_ASSERT(sh && sh->refcnt);
485 [ # # ]: 0 : if (sh->refcnt > 1)
486 : : return 0;
487 : 0 : err = mlx5_alloc_table_hash_list(priv);
488 [ # # ]: 0 : if (err)
489 : 0 : goto error;
490 : 0 : sh->default_miss_action =
491 : 0 : mlx5_glue->dr_create_flow_action_default_miss();
492 [ # # ]: 0 : if (!sh->default_miss_action)
493 : 0 : DRV_LOG(WARNING, "Default miss action is not supported.");
494 : : /* The resources below are only valid with DV support. */
495 : : #ifdef HAVE_IBV_FLOW_DV_SUPPORT
496 : : /* Init shared flex parsers list, no need lcore_share */
497 : 0 : snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
498 : 0 : sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
499 : : mlx5_flex_parser_create_cb,
500 : : mlx5_flex_parser_match_cb,
501 : : mlx5_flex_parser_remove_cb,
502 : : mlx5_flex_parser_clone_cb,
503 : : mlx5_flex_parser_clone_free_cb);
504 [ # # ]: 0 : if (!sh->flex_parsers_dv)
505 : 0 : goto error;
506 [ # # ]: 0 : if (priv->sh->config.dv_flow_en == 2) {
507 [ # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
508 [ # # ]: 0 : sh->dv_regc0_mask) {
509 : : /* Reuse DV callback functions. */
510 : 0 : sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
511 : : MLX5_FLOW_MREG_HTABLE_SZ,
512 : : false, true, eth_dev,
513 : : flow_nta_mreg_create_cb,
514 : : flow_dv_mreg_match_cb,
515 : : flow_nta_mreg_remove_cb,
516 : : flow_dv_mreg_clone_cb,
517 : : flow_dv_mreg_clone_free_cb);
518 [ # # ]: 0 : if (!sh->mreg_cp_tbl) {
519 : : err = ENOMEM;
520 : 0 : goto error;
521 : : }
522 : : }
523 : 0 : return 0;
524 : : }
525 : : /* Init port id action list. */
526 : : snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
527 : 0 : sh->port_id_action_list = mlx5_list_create(s, sh, true,
528 : : flow_dv_port_id_create_cb,
529 : : flow_dv_port_id_match_cb,
530 : : flow_dv_port_id_remove_cb,
531 : : flow_dv_port_id_clone_cb,
532 : : flow_dv_port_id_clone_free_cb);
533 [ # # ]: 0 : if (!sh->port_id_action_list)
534 : 0 : goto error;
535 : : /* Init push vlan action list. */
536 : : snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
537 : 0 : sh->push_vlan_action_list = mlx5_list_create(s, sh, true,
538 : : flow_dv_push_vlan_create_cb,
539 : : flow_dv_push_vlan_match_cb,
540 : : flow_dv_push_vlan_remove_cb,
541 : : flow_dv_push_vlan_clone_cb,
542 : : flow_dv_push_vlan_clone_free_cb);
543 [ # # ]: 0 : if (!sh->push_vlan_action_list)
544 : 0 : goto error;
545 : : /* Init sample action list. */
546 : : snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
547 : 0 : sh->sample_action_list = mlx5_list_create(s, sh, true,
548 : : flow_dv_sample_create_cb,
549 : : flow_dv_sample_match_cb,
550 : : flow_dv_sample_remove_cb,
551 : : flow_dv_sample_clone_cb,
552 : : flow_dv_sample_clone_free_cb);
553 [ # # ]: 0 : if (!sh->sample_action_list)
554 : 0 : goto error;
555 : : /* Init dest array action list. */
556 : : snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
557 : 0 : sh->dest_array_list = mlx5_list_create(s, sh, true,
558 : : flow_dv_dest_array_create_cb,
559 : : flow_dv_dest_array_match_cb,
560 : : flow_dv_dest_array_remove_cb,
561 : : flow_dv_dest_array_clone_cb,
562 : : flow_dv_dest_array_clone_free_cb);
563 [ # # ]: 0 : if (!sh->dest_array_list)
564 : 0 : goto error;
565 : : #else
566 : : if (priv->sh->config.dv_flow_en == 2)
567 : : return 0;
568 : : #endif
569 : : #ifdef HAVE_MLX5DV_DR
570 : : void *domain;
571 : :
572 : : /* Reference counter is zero, we should initialize structures. */
573 : 0 : domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
574 : : MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
575 [ # # ]: 0 : if (!domain) {
576 : 0 : DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
577 : 0 : err = errno;
578 : 0 : goto error;
579 : : }
580 : 0 : sh->rx_domain = domain;
581 : 0 : domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
582 : : MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
583 [ # # ]: 0 : if (!domain) {
584 : 0 : DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
585 : 0 : err = errno;
586 : 0 : goto error;
587 : : }
588 : 0 : sh->tx_domain = domain;
589 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
590 [ # # ]: 0 : if (sh->config.dv_esw_en) {
591 : 0 : domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
592 : : MLX5DV_DR_DOMAIN_TYPE_FDB);
593 [ # # ]: 0 : if (!domain) {
594 : 0 : DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
595 : 0 : err = errno;
596 : 0 : goto error;
597 : : }
598 : 0 : sh->fdb_domain = domain;
599 : : }
600 : : /*
601 : : * The drop action is just some dummy placeholder in rdma-core. It
602 : : * does not belong to domains and has no any attributes, and, can be
603 : : * shared by the entire device.
604 : : */
605 : 0 : sh->dr_drop_action = mlx5_glue->dr_create_flow_action_drop();
606 [ # # ]: 0 : if (!sh->dr_drop_action) {
607 : 0 : DRV_LOG(ERR, "FDB mlx5dv_dr_create_flow_action_drop");
608 : 0 : err = errno;
609 : 0 : goto error;
610 : : }
611 : :
612 [ # # ]: 0 : if (sh->config.dv_flow_en == 1) {
613 : : /* Query availability of metadata reg_c's. */
614 [ # # ]: 0 : if (!priv->sh->metadata_regc_check_flag) {
615 : 0 : err = mlx5_flow_discover_mreg_c(eth_dev);
616 [ # # ]: 0 : if (err < 0) {
617 : 0 : err = -err;
618 : 0 : goto error;
619 : : }
620 : : }
621 [ # # ]: 0 : if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
622 : 0 : DRV_LOG(DEBUG,
623 : : "port %u extensive metadata register is not supported",
624 : : eth_dev->data->port_id);
625 [ # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
626 : 0 : DRV_LOG(ERR, "metadata mode %u is not supported "
627 : : "(no metadata registers available)",
628 : : sh->config.dv_xmeta_en);
629 : : err = ENOTSUP;
630 : 0 : goto error;
631 : : }
632 : : }
633 [ # # # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
634 [ # # ]: 0 : mlx5_flow_ext_mreg_supported(eth_dev) && sh->dv_regc0_mask) {
635 : 0 : sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
636 : : MLX5_FLOW_MREG_HTABLE_SZ,
637 : : false, true, eth_dev,
638 : : flow_dv_mreg_create_cb,
639 : : flow_dv_mreg_match_cb,
640 : : flow_dv_mreg_remove_cb,
641 : : flow_dv_mreg_clone_cb,
642 : : flow_dv_mreg_clone_free_cb);
643 [ # # ]: 0 : if (!sh->mreg_cp_tbl) {
644 : : err = ENOMEM;
645 : 0 : goto error;
646 : : }
647 : : }
648 : : }
649 : : #endif
650 [ # # # # ]: 0 : if (!sh->tunnel_hub && sh->config.dv_miss_info)
651 : 0 : err = mlx5_alloc_tunnel_hub(sh);
652 [ # # ]: 0 : if (err) {
653 : 0 : DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
654 : 0 : goto error;
655 : : }
656 [ # # ]: 0 : if (sh->config.reclaim_mode == MLX5_RCM_AGGR) {
657 : 0 : mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
658 : 0 : mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
659 [ # # ]: 0 : if (sh->fdb_domain)
660 : 0 : mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
661 : : }
662 : 0 : sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
663 [ # # ]: 0 : if (!sh->config.allow_duplicate_pattern) {
664 : : #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE
665 : : DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?");
666 : : #endif
667 : 0 : mlx5_glue->dr_allow_duplicate_rules(sh->rx_domain, 0);
668 : 0 : mlx5_glue->dr_allow_duplicate_rules(sh->tx_domain, 0);
669 [ # # ]: 0 : if (sh->fdb_domain)
670 : 0 : mlx5_glue->dr_allow_duplicate_rules(sh->fdb_domain, 0);
671 : : }
672 : :
673 : 0 : __mlx5_discovery_misc5_cap(priv);
674 : : #endif /* HAVE_MLX5DV_DR */
675 : 0 : LIST_INIT(&sh->shared_rxqs);
676 : 0 : return 0;
677 : 0 : error:
678 : : /* Rollback the created objects. */
679 [ # # ]: 0 : if (sh->rx_domain) {
680 : 0 : mlx5_glue->dr_destroy_domain(sh->rx_domain);
681 : 0 : sh->rx_domain = NULL;
682 : : }
683 [ # # ]: 0 : if (sh->tx_domain) {
684 : 0 : mlx5_glue->dr_destroy_domain(sh->tx_domain);
685 : 0 : sh->tx_domain = NULL;
686 : : }
687 [ # # ]: 0 : if (sh->fdb_domain) {
688 : 0 : mlx5_glue->dr_destroy_domain(sh->fdb_domain);
689 : 0 : sh->fdb_domain = NULL;
690 : : }
691 [ # # ]: 0 : if (sh->dr_drop_action) {
692 : 0 : mlx5_glue->destroy_flow_action(sh->dr_drop_action);
693 : 0 : sh->dr_drop_action = NULL;
694 : : }
695 [ # # ]: 0 : if (sh->pop_vlan_action) {
696 : 0 : mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
697 : 0 : sh->pop_vlan_action = NULL;
698 : : }
699 [ # # ]: 0 : if (sh->encaps_decaps) {
700 : 0 : mlx5_hlist_destroy(sh->encaps_decaps);
701 : 0 : sh->encaps_decaps = NULL;
702 : : }
703 [ # # ]: 0 : if (sh->modify_cmds) {
704 : 0 : mlx5_hlist_destroy(sh->modify_cmds);
705 : 0 : sh->modify_cmds = NULL;
706 : : }
707 [ # # ]: 0 : if (sh->tag_table) {
708 : : /* tags should be destroyed with flow before. */
709 : 0 : mlx5_hlist_destroy(sh->tag_table);
710 : 0 : sh->tag_table = NULL;
711 : : }
712 [ # # ]: 0 : if (sh->tunnel_hub) {
713 : 0 : mlx5_release_tunnel_hub(sh, priv->dev_port);
714 : 0 : sh->tunnel_hub = NULL;
715 : : }
716 : 0 : mlx5_free_table_hash_list(priv);
717 [ # # ]: 0 : if (sh->port_id_action_list) {
718 : 0 : mlx5_list_destroy(sh->port_id_action_list);
719 : 0 : sh->port_id_action_list = NULL;
720 : : }
721 [ # # ]: 0 : if (sh->push_vlan_action_list) {
722 : 0 : mlx5_list_destroy(sh->push_vlan_action_list);
723 : 0 : sh->push_vlan_action_list = NULL;
724 : : }
725 [ # # ]: 0 : if (sh->sample_action_list) {
726 : 0 : mlx5_list_destroy(sh->sample_action_list);
727 : 0 : sh->sample_action_list = NULL;
728 : : }
729 [ # # ]: 0 : if (sh->dest_array_list) {
730 : 0 : mlx5_list_destroy(sh->dest_array_list);
731 : 0 : sh->dest_array_list = NULL;
732 : : }
733 [ # # ]: 0 : if (sh->mreg_cp_tbl) {
734 : 0 : mlx5_hlist_destroy(sh->mreg_cp_tbl);
735 : 0 : sh->mreg_cp_tbl = NULL;
736 : : }
737 : : return err;
738 : : }
739 : :
740 : : /**
741 : : * Destroy DR related data within private structure.
742 : : *
743 : : * @param[in] priv
744 : : * Pointer to the private device data structure.
745 : : */
746 : : void
747 : 0 : mlx5_os_free_shared_dr(struct mlx5_priv *priv)
748 : : {
749 : 0 : struct mlx5_dev_ctx_shared *sh = priv->sh;
750 : : #ifdef HAVE_MLX5DV_DR
751 : : int i;
752 : : #endif
753 : :
754 : : MLX5_ASSERT(sh && sh->refcnt);
755 [ # # ]: 0 : if (sh->refcnt > 1)
756 : : return;
757 : : MLX5_ASSERT(LIST_EMPTY(&sh->shared_rxqs));
758 : : #ifdef HAVE_MLX5DV_DR
759 [ # # ]: 0 : if (sh->rx_domain) {
760 : 0 : mlx5_glue->dr_destroy_domain(sh->rx_domain);
761 : 0 : sh->rx_domain = NULL;
762 : : }
763 [ # # ]: 0 : if (sh->tx_domain) {
764 : 0 : mlx5_glue->dr_destroy_domain(sh->tx_domain);
765 : 0 : sh->tx_domain = NULL;
766 : : }
767 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
768 [ # # ]: 0 : if (sh->fdb_domain) {
769 : 0 : mlx5_glue->dr_destroy_domain(sh->fdb_domain);
770 : 0 : sh->fdb_domain = NULL;
771 : : }
772 [ # # ]: 0 : if (sh->dr_drop_action) {
773 : 0 : mlx5_glue->destroy_flow_action(sh->dr_drop_action);
774 : 0 : sh->dr_drop_action = NULL;
775 : : }
776 : : #endif
777 [ # # ]: 0 : if (sh->pop_vlan_action) {
778 : 0 : mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
779 : 0 : sh->pop_vlan_action = NULL;
780 : : }
781 [ # # ]: 0 : for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
782 [ # # ]: 0 : if (sh->send_to_kernel_action[i].action) {
783 : : void *action = sh->send_to_kernel_action[i].action;
784 : :
785 : 0 : mlx5_glue->destroy_flow_action(action);
786 : 0 : sh->send_to_kernel_action[i].action = NULL;
787 : : }
788 [ # # ]: 0 : if (sh->send_to_kernel_action[i].tbl) {
789 : : struct mlx5_flow_tbl_resource *tbl =
790 : : sh->send_to_kernel_action[i].tbl;
791 : :
792 : 0 : flow_dv_tbl_resource_release(sh, tbl);
793 : 0 : sh->send_to_kernel_action[i].tbl = NULL;
794 : : }
795 : : }
796 : : #endif /* HAVE_MLX5DV_DR */
797 [ # # ]: 0 : if (sh->default_miss_action)
798 : 0 : mlx5_glue->destroy_flow_action
799 : : (sh->default_miss_action);
800 [ # # ]: 0 : if (sh->encaps_decaps) {
801 : 0 : mlx5_hlist_destroy(sh->encaps_decaps);
802 : 0 : sh->encaps_decaps = NULL;
803 : : }
804 [ # # ]: 0 : if (sh->modify_cmds) {
805 : 0 : mlx5_hlist_destroy(sh->modify_cmds);
806 : 0 : sh->modify_cmds = NULL;
807 : : }
808 [ # # ]: 0 : if (sh->tag_table) {
809 : : /* tags should be destroyed with flow before. */
810 : 0 : mlx5_hlist_destroy(sh->tag_table);
811 : 0 : sh->tag_table = NULL;
812 : : }
813 [ # # ]: 0 : if (sh->tunnel_hub) {
814 : 0 : mlx5_release_tunnel_hub(sh, priv->dev_port);
815 : 0 : sh->tunnel_hub = NULL;
816 : : }
817 : 0 : mlx5_free_table_hash_list(priv);
818 [ # # ]: 0 : if (sh->port_id_action_list) {
819 : 0 : mlx5_list_destroy(sh->port_id_action_list);
820 : 0 : sh->port_id_action_list = NULL;
821 : : }
822 [ # # ]: 0 : if (sh->push_vlan_action_list) {
823 : 0 : mlx5_list_destroy(sh->push_vlan_action_list);
824 : 0 : sh->push_vlan_action_list = NULL;
825 : : }
826 [ # # ]: 0 : if (sh->sample_action_list) {
827 : 0 : mlx5_list_destroy(sh->sample_action_list);
828 : 0 : sh->sample_action_list = NULL;
829 : : }
830 [ # # ]: 0 : if (sh->dest_array_list) {
831 : 0 : mlx5_list_destroy(sh->dest_array_list);
832 : 0 : sh->dest_array_list = NULL;
833 : : }
834 [ # # ]: 0 : if (sh->mreg_cp_tbl) {
835 : 0 : mlx5_hlist_destroy(sh->mreg_cp_tbl);
836 : 0 : sh->mreg_cp_tbl = NULL;
837 : : }
838 : : }
839 : :
840 : : /**
841 : : * Initialize shared data between primary and secondary process.
842 : : *
843 : : * A memzone is reserved by primary process and secondary processes attach to
844 : : * the memzone.
845 : : *
846 : : * @return
847 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
848 : : */
849 : : static int
850 : 0 : mlx5_init_shared_data(void)
851 : : {
852 : : const struct rte_memzone *mz;
853 : : int ret = 0;
854 : :
855 : : rte_spinlock_lock(&mlx5_shared_data_lock);
856 [ # # ]: 0 : if (mlx5_shared_data == NULL) {
857 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
858 : : /* Allocate shared memory. */
859 : 0 : mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
860 : : sizeof(*mlx5_shared_data),
861 : : SOCKET_ID_ANY, 0);
862 [ # # ]: 0 : if (mz == NULL) {
863 : 0 : DRV_LOG(ERR,
864 : : "Cannot allocate mlx5 shared data");
865 : 0 : ret = -rte_errno;
866 : 0 : goto error;
867 : : }
868 : 0 : mlx5_shared_data = mz->addr;
869 : : memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
870 : 0 : rte_spinlock_init(&mlx5_shared_data->lock);
871 : : } else {
872 : : /* Lookup allocated shared memory. */
873 : 0 : mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
874 [ # # ]: 0 : if (mz == NULL) {
875 : 0 : DRV_LOG(ERR,
876 : : "Cannot attach mlx5 shared data");
877 : 0 : ret = -rte_errno;
878 : 0 : goto error;
879 : : }
880 : 0 : mlx5_shared_data = mz->addr;
881 : : memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
882 : : }
883 : : }
884 : 0 : error:
885 : : rte_spinlock_unlock(&mlx5_shared_data_lock);
886 : 0 : return ret;
887 : : }
888 : :
889 : : /**
890 : : * PMD global initialization.
891 : : *
892 : : * Independent from individual device, this function initializes global
893 : : * per-PMD data structures distinguishing primary and secondary processes.
894 : : * Hence, each initialization is called once per a process.
895 : : *
896 : : * @return
897 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
898 : : */
899 : : static int
900 : 0 : mlx5_init_once(void)
901 : : {
902 : : struct mlx5_shared_data *sd;
903 : : struct mlx5_local_data *ld = &mlx5_local_data;
904 : : int ret = 0;
905 : :
906 [ # # ]: 0 : if (mlx5_init_shared_data())
907 : 0 : return -rte_errno;
908 : 0 : sd = mlx5_shared_data;
909 : : MLX5_ASSERT(sd);
910 : 0 : rte_spinlock_lock(&sd->lock);
911 [ # # # ]: 0 : switch (rte_eal_process_type()) {
912 : 0 : case RTE_PROC_PRIMARY:
913 [ # # ]: 0 : if (sd->init_done)
914 : : break;
915 : 0 : ret = mlx5_mp_init_primary(MLX5_MP_NAME,
916 : : mlx5_mp_os_primary_handle);
917 [ # # ]: 0 : if (ret)
918 : 0 : goto out;
919 : 0 : sd->init_done = true;
920 : 0 : break;
921 : 0 : case RTE_PROC_SECONDARY:
922 [ # # ]: 0 : if (ld->init_done)
923 : : break;
924 : 0 : ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
925 : : mlx5_mp_os_secondary_handle);
926 [ # # ]: 0 : if (ret)
927 : 0 : goto out;
928 : 0 : ++sd->secondary_cnt;
929 : 0 : ld->init_done = true;
930 : 0 : break;
931 : : default:
932 : : break;
933 : : }
934 : 0 : out:
935 : : rte_spinlock_unlock(&sd->lock);
936 : 0 : return ret;
937 : : }
938 : :
939 : : /**
940 : : * DR flow drop action support detect.
941 : : *
942 : : * @param dev
943 : : * Pointer to rte_eth_dev structure.
944 : : *
945 : : */
946 : : static void
947 : 0 : mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused)
948 : : {
949 : : #ifdef HAVE_MLX5DV_DR
950 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
951 : :
952 [ # # # # ]: 0 : if (!priv->sh->config.dv_flow_en || !priv->sh->dr_drop_action)
953 : : return;
954 : : /**
955 : : * DR supports drop action placeholder when it is supported;
956 : : * otherwise, use the queue drop action.
957 : : */
958 [ # # ]: 0 : if (!priv->sh->drop_action_check_flag) {
959 [ # # ]: 0 : if (!mlx5_flow_discover_dr_action_support(dev))
960 : 0 : priv->sh->dr_root_drop_action_en = 1;
961 : 0 : priv->sh->drop_action_check_flag = 1;
962 : : }
963 [ # # ]: 0 : if (priv->sh->dr_root_drop_action_en)
964 : 0 : priv->root_drop_action = priv->sh->dr_drop_action;
965 : : else
966 : 0 : priv->root_drop_action = priv->drop_queue.hrxq->action;
967 : : #endif
968 : : }
969 : :
970 : : static void
971 : 0 : mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
972 : : {
973 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
974 : 0 : void *ctx = priv->sh->cdev->ctx;
975 : :
976 : 0 : priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx, NULL);
977 [ # # ]: 0 : if (!priv->q_counters) {
978 : 0 : struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
979 : : struct ibv_wq *wq;
980 : :
981 : 0 : DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
982 : : "by DevX - fall-back to use the kernel driver global "
983 : : "queue counter.", dev->data->port_id);
984 : :
985 : : /* Create WQ by kernel and query its queue counter ID. */
986 [ # # ]: 0 : if (cq) {
987 : 0 : wq = mlx5_glue->create_wq(ctx,
988 : 0 : &(struct ibv_wq_init_attr){
989 : : .wq_type = IBV_WQT_RQ,
990 : : .max_wr = 1,
991 : : .max_sge = 1,
992 : 0 : .pd = priv->sh->cdev->pd,
993 : : .cq = cq,
994 : : });
995 [ # # ]: 0 : if (wq) {
996 : : /* Counter is assigned only on RDY state. */
997 : 0 : int ret = mlx5_glue->modify_wq(wq,
998 : 0 : &(struct ibv_wq_attr){
999 : : .attr_mask = IBV_WQ_ATTR_STATE,
1000 : : .wq_state = IBV_WQS_RDY,
1001 : : });
1002 : :
1003 [ # # ]: 0 : if (ret == 0)
1004 : 0 : mlx5_devx_cmd_wq_query(wq,
1005 : : &priv->counter_set_id);
1006 : 0 : claim_zero(mlx5_glue->destroy_wq(wq));
1007 : : }
1008 : 0 : claim_zero(mlx5_glue->destroy_cq(cq));
1009 : : }
1010 : : } else {
1011 : 0 : priv->counter_set_id = priv->q_counters->id;
1012 : : }
1013 [ # # ]: 0 : if (priv->counter_set_id == 0)
1014 : 0 : DRV_LOG(INFO, "Part of the port %d statistics will not be "
1015 : : "available.", dev->data->port_id);
1016 : 0 : }
1017 : :
1018 : : /**
1019 : : * Check if representor spawn info match devargs.
1020 : : *
1021 : : * @param spawn
1022 : : * Verbs device parameters (name, port, switch_info) to spawn.
1023 : : * @param eth_da
1024 : : * Device devargs to probe.
1025 : : *
1026 : : * @return
1027 : : * Match result.
1028 : : */
1029 : : static bool
1030 : 0 : mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
1031 : : struct rte_eth_devargs *eth_da)
1032 : : {
1033 : 0 : struct mlx5_switch_info *switch_info = &spawn->info;
1034 : : unsigned int p, f;
1035 : : uint16_t id;
1036 : 0 : uint16_t repr_id = mlx5_representor_id_encode(switch_info,
1037 : : eth_da->type);
1038 : :
1039 : : /*
1040 : : * Assuming Multiport E-Switch device was detected,
1041 : : * if spawned port is an uplink, check if the port
1042 : : * was requested through representor devarg.
1043 : : */
1044 [ # # ]: 0 : if (mlx5_is_probed_port_on_mpesw_device(spawn) &&
1045 [ # # ]: 0 : switch_info->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
1046 [ # # ]: 0 : for (p = 0; p < eth_da->nb_ports; ++p)
1047 [ # # ]: 0 : if (switch_info->port_name == eth_da->ports[p])
1048 : : return true;
1049 : 0 : rte_errno = EBUSY;
1050 : 0 : return false;
1051 : : }
1052 [ # # # # : 0 : switch (eth_da->type) {
# ]
1053 : : case RTE_ETH_REPRESENTOR_PF:
1054 : : /*
1055 : : * PF representors provided in devargs translate to uplink ports, but
1056 : : * if and only if the device is a part of MPESW device.
1057 : : */
1058 [ # # ]: 0 : if (!mlx5_is_probed_port_on_mpesw_device(spawn)) {
1059 : 0 : rte_errno = EBUSY;
1060 : 0 : return false;
1061 : : }
1062 : : break;
1063 : 0 : case RTE_ETH_REPRESENTOR_SF:
1064 [ # # ]: 0 : if (!(spawn->info.port_name == -1 &&
1065 [ # # ]: 0 : switch_info->name_type ==
1066 : 0 : MLX5_PHYS_PORT_NAME_TYPE_PFHPF) &&
1067 [ # # ]: 0 : switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFSF) {
1068 : 0 : rte_errno = EBUSY;
1069 : 0 : return false;
1070 : : }
1071 : : break;
1072 : 0 : case RTE_ETH_REPRESENTOR_VF:
1073 : : /* Allows HPF representor index -1 as exception. */
1074 [ # # ]: 0 : if (!(spawn->info.port_name == -1 &&
1075 [ # # ]: 0 : switch_info->name_type ==
1076 : 0 : MLX5_PHYS_PORT_NAME_TYPE_PFHPF) &&
1077 [ # # ]: 0 : switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFVF) {
1078 : 0 : rte_errno = EBUSY;
1079 : 0 : return false;
1080 : : }
1081 : : break;
1082 : 0 : case RTE_ETH_REPRESENTOR_NONE:
1083 : 0 : rte_errno = EBUSY;
1084 : 0 : return false;
1085 : 0 : default:
1086 : 0 : rte_errno = ENOTSUP;
1087 : 0 : DRV_LOG(ERR, "unsupported representor type");
1088 : 0 : return false;
1089 : : }
1090 : : /* Check representor ID: */
1091 [ # # ]: 0 : for (p = 0; p < eth_da->nb_ports; ++p) {
1092 [ # # # # ]: 0 : if (!mlx5_is_probed_port_on_mpesw_device(spawn) && spawn->pf_bond < 0) {
1093 : : /* For non-LAG mode, allow and ignore pf. */
1094 : 0 : switch_info->pf_num = eth_da->ports[p];
1095 : 0 : repr_id = mlx5_representor_id_encode(switch_info,
1096 : : eth_da->type);
1097 : : }
1098 [ # # ]: 0 : for (f = 0; f < eth_da->nb_representor_ports; ++f) {
1099 : 0 : id = MLX5_REPRESENTOR_ID
1100 : : (eth_da->ports[p], eth_da->type,
1101 : : eth_da->representor_ports[f]);
1102 [ # # ]: 0 : if (repr_id == id)
1103 : : return true;
1104 : : }
1105 : : }
1106 : 0 : rte_errno = EBUSY;
1107 : 0 : return false;
1108 : : }
1109 : :
1110 : : /**
1111 : : * Spawn an Ethernet device from Verbs information.
1112 : : *
1113 : : * @param dpdk_dev
1114 : : * Backing DPDK device.
1115 : : * @param spawn
1116 : : * Verbs device parameters (name, port, switch_info) to spawn.
1117 : : * @param eth_da
1118 : : * Device arguments.
1119 : : * @param mkvlist
1120 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
1121 : : *
1122 : : * @return
1123 : : * A valid Ethernet device object on success, NULL otherwise and rte_errno
1124 : : * is set. The following errors are defined:
1125 : : *
1126 : : * EBUSY: device is not supposed to be spawned.
1127 : : * EEXIST: device is already spawned
1128 : : */
1129 : : static struct rte_eth_dev *
1130 : 0 : mlx5_dev_spawn(struct rte_device *dpdk_dev,
1131 : : struct mlx5_dev_spawn_data *spawn,
1132 : : struct rte_eth_devargs *eth_da,
1133 : : struct mlx5_kvargs_ctrl *mkvlist)
1134 : : {
1135 : 0 : const struct mlx5_switch_info *switch_info = &spawn->info;
1136 : : struct mlx5_dev_ctx_shared *sh = NULL;
1137 : 0 : struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
1138 : : struct rte_eth_dev *eth_dev = NULL;
1139 : : struct mlx5_priv *priv = NULL;
1140 : : int err = 0;
1141 : : struct rte_ether_addr mac;
1142 : : char name[RTE_ETH_NAME_MAX_LEN];
1143 : : int own_domain_id = 0;
1144 : : uint16_t port_id;
1145 : 0 : struct mlx5_port_info vport_info = { .query_flags = 0 };
1146 : : int nl_rdma;
1147 : : int i;
1148 : : struct mlx5_indexed_pool_config icfg[RTE_DIM(default_icfg)];
1149 : :
1150 : : memcpy(icfg, default_icfg, sizeof(icfg));
1151 : : /* Determine if this port representor is supposed to be spawned. */
1152 [ # # # # : 0 : if (switch_info->representor && dpdk_dev->devargs &&
# # ]
1153 : 0 : !mlx5_representor_match(spawn, eth_da))
1154 : : return NULL;
1155 : : /* Build device name. */
1156 [ # # ]: 0 : if (spawn->pf_bond >= 0) {
1157 : : /* Bonding device. */
1158 [ # # ]: 0 : if (!switch_info->representor) {
1159 : 0 : err = snprintf(name, sizeof(name), "%s_%s",
1160 : : dpdk_dev->name, spawn->phys_dev_name);
1161 : : } else {
1162 : 0 : err = snprintf(name, sizeof(name), "%s_%s_representor_c%dpf%d%s%u",
1163 : : dpdk_dev->name, spawn->phys_dev_name,
1164 : 0 : switch_info->ctrl_num,
1165 : 0 : switch_info->pf_num,
1166 : 0 : switch_info->name_type ==
1167 : : MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
1168 [ # # ]: 0 : switch_info->port_name);
1169 : : }
1170 [ # # ]: 0 : } else if (mlx5_is_probed_port_on_mpesw_device(spawn)) {
1171 : : /* MPESW device. */
1172 [ # # ]: 0 : if (switch_info->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
1173 : 0 : err = snprintf(name, sizeof(name), "%s_p%d",
1174 : : dpdk_dev->name, spawn->mpesw_port);
1175 : : } else {
1176 : 0 : err = snprintf(name, sizeof(name), "%s_representor_c%dpf%d%s%u",
1177 : : dpdk_dev->name,
1178 : 0 : switch_info->ctrl_num,
1179 : 0 : switch_info->pf_num,
1180 : : switch_info->name_type ==
1181 : : MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
1182 [ # # ]: 0 : switch_info->port_name);
1183 : : }
1184 : : } else {
1185 : : /* Single device. */
1186 [ # # ]: 0 : if (!switch_info->representor)
1187 : 0 : strlcpy(name, dpdk_dev->name, sizeof(name));
1188 : : else
1189 : 0 : err = snprintf(name, sizeof(name), "%s_representor_%s%u",
1190 : : dpdk_dev->name,
1191 : 0 : switch_info->name_type ==
1192 : : MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
1193 [ # # ]: 0 : switch_info->port_name);
1194 : : }
1195 [ # # ]: 0 : if (err >= (int)sizeof(name))
1196 : 0 : DRV_LOG(WARNING, "device name overflow %s", name);
1197 : : /* check if the device is already spawned */
1198 [ # # ]: 0 : if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
1199 : : /*
1200 : : * When device is already spawned, its devargs should be set
1201 : : * as used. otherwise, mlx5_kvargs_validate() will fail.
1202 : : */
1203 [ # # ]: 0 : if (mkvlist)
1204 : 0 : mlx5_port_args_set_used(name, port_id, mkvlist);
1205 : 0 : rte_errno = EEXIST;
1206 : 0 : return NULL;
1207 : : }
1208 : 0 : DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
1209 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1210 : : struct mlx5_mp_id mp_id;
1211 : : int fd;
1212 : :
1213 : 0 : eth_dev = rte_eth_dev_attach_secondary(name);
1214 [ # # ]: 0 : if (eth_dev == NULL) {
1215 : 0 : DRV_LOG(ERR, "can not attach rte ethdev");
1216 : 0 : rte_errno = ENOMEM;
1217 : 0 : return NULL;
1218 : : }
1219 : 0 : eth_dev->device = dpdk_dev;
1220 : 0 : eth_dev->dev_ops = &mlx5_dev_sec_ops;
1221 : 0 : eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1222 : 0 : eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1223 : 0 : err = mlx5_proc_priv_init(eth_dev);
1224 [ # # ]: 0 : if (err)
1225 : : return NULL;
1226 : 0 : mlx5_mp_id_init(&mp_id, eth_dev->data->port_id);
1227 : : /* Receive command fd from primary process */
1228 : 0 : fd = mlx5_mp_req_verbs_cmd_fd(&mp_id);
1229 [ # # ]: 0 : if (fd < 0)
1230 : 0 : goto err_secondary;
1231 : : /* Remap UAR for Tx queues. */
1232 : 0 : err = mlx5_tx_uar_init_secondary(eth_dev, fd);
1233 : 0 : close(fd);
1234 [ # # ]: 0 : if (err)
1235 : 0 : goto err_secondary;
1236 : : /*
1237 : : * Ethdev pointer is still required as input since
1238 : : * the primary device is not accessible from the
1239 : : * secondary process.
1240 : : */
1241 : 0 : eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
1242 : 0 : eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
1243 : 0 : return eth_dev;
1244 : 0 : err_secondary:
1245 : 0 : mlx5_dev_close(eth_dev);
1246 : 0 : return NULL;
1247 : : }
1248 : 0 : sh = mlx5_alloc_shared_dev_ctx(spawn, mkvlist);
1249 [ # # ]: 0 : if (!sh)
1250 : : return NULL;
1251 : 0 : nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
1252 : : /* Check port status. */
1253 [ # # ]: 0 : if (spawn->phys_port <= UINT8_MAX) {
1254 : : /* Legacy Verbs api only support u8 port number. */
1255 : 0 : err = mlx5_glue->query_port(sh->cdev->ctx, spawn->phys_port,
1256 : : &port_attr);
1257 [ # # ]: 0 : if (err) {
1258 : 0 : DRV_LOG(ERR, "port query failed: %s", strerror(err));
1259 : 0 : goto error;
1260 : : }
1261 [ # # ]: 0 : if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1262 : 0 : DRV_LOG(ERR, "port is not configured in Ethernet mode");
1263 : : err = EINVAL;
1264 : 0 : goto error;
1265 : : }
1266 [ # # ]: 0 : } else if (nl_rdma >= 0) {
1267 : : /* IB doesn't allow more than 255 ports, must be Ethernet. */
1268 : 0 : err = mlx5_nl_port_state(nl_rdma,
1269 : : spawn->phys_dev_name,
1270 : 0 : spawn->phys_port, &spawn->cdev->dev_info);
1271 [ # # ]: 0 : if (err < 0) {
1272 : 0 : DRV_LOG(INFO, "Failed to get netlink port state: %s",
1273 : : strerror(rte_errno));
1274 : 0 : err = -rte_errno;
1275 : 0 : goto error;
1276 : : }
1277 : 0 : port_attr.state = (enum ibv_port_state)err;
1278 : : }
1279 [ # # ]: 0 : if (port_attr.state != IBV_PORT_ACTIVE)
1280 : 0 : DRV_LOG(INFO, "port is not active: \"%s\" (%d)",
1281 : : mlx5_glue->port_state_str(port_attr.state),
1282 : : port_attr.state);
1283 : : /* Allocate private eth device data. */
1284 : 0 : priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1285 : : sizeof(*priv),
1286 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1287 [ # # ]: 0 : if (priv == NULL) {
1288 : 0 : DRV_LOG(ERR, "priv allocation failure");
1289 : : err = ENOMEM;
1290 : 0 : goto error;
1291 : : }
1292 : : /*
1293 : : * When user configures remote PD and CTX and device creates RxQ by
1294 : : * DevX, external RxQ is both supported and requested.
1295 : : */
1296 [ # # # # : 0 : if (mlx5_imported_pd_and_ctx(sh->cdev) && mlx5_devx_obj_ops_en(sh)) {
# # ]
1297 : 0 : priv->ext_rxqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1298 : : sizeof(struct mlx5_external_q) *
1299 : : MLX5_MAX_EXT_RX_QUEUES, 0,
1300 : : SOCKET_ID_ANY);
1301 [ # # ]: 0 : if (priv->ext_rxqs == NULL) {
1302 : 0 : DRV_LOG(ERR, "Fail to allocate external RxQ array.");
1303 : : err = ENOMEM;
1304 : 0 : goto error;
1305 : : }
1306 : 0 : priv->ext_txqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1307 : : sizeof(struct mlx5_external_q) *
1308 : : MLX5_MAX_EXT_TX_QUEUES, 0,
1309 : : SOCKET_ID_ANY);
1310 [ # # ]: 0 : if (priv->ext_txqs == NULL) {
1311 : 0 : DRV_LOG(ERR, "Fail to allocate external TxQ array.");
1312 : : err = ENOMEM;
1313 : 0 : goto error;
1314 : : }
1315 : 0 : DRV_LOG(DEBUG, "External queue is supported.");
1316 : : }
1317 : 0 : priv->sh = sh;
1318 : 0 : priv->dev_port = spawn->phys_port;
1319 : 0 : priv->pci_dev = spawn->pci_dev;
1320 : 0 : priv->mtu = RTE_ETHER_MTU;
1321 : : /* Some internal functions rely on Netlink sockets, open them now. */
1322 : 0 : priv->nl_socket_rdma = nl_rdma;
1323 : 0 : priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE, 0);
1324 : 0 : priv->representor = !!switch_info->representor;
1325 : 0 : priv->master = !!switch_info->master;
1326 : 0 : priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1327 : 0 : priv->vport_meta_tag = 0;
1328 : 0 : priv->vport_meta_mask = 0;
1329 : 0 : priv->pf_bond = spawn->pf_bond;
1330 : 0 : priv->mpesw_port = spawn->mpesw_port;
1331 : 0 : priv->mpesw_uplink = false;
1332 : 0 : priv->mpesw_owner = spawn->info.mpesw_owner;
1333 [ # # ]: 0 : if (mlx5_is_port_on_mpesw_device(priv))
1334 : 0 : priv->mpesw_uplink = (spawn->info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK);
1335 : :
1336 [ # # ]: 0 : DRV_LOG(DEBUG,
1337 : : "dev_port=%u bus=%s pci=%s master=%d representor=%d pf_bond=%d "
1338 : : "mpesw_port=%d mpesw_uplink=%d",
1339 : : priv->dev_port, dpdk_dev->bus->name,
1340 : : priv->pci_dev ? priv->pci_dev->name : "NONE",
1341 : : priv->master, priv->representor, priv->pf_bond,
1342 : : priv->mpesw_port, priv->mpesw_uplink);
1343 : :
1344 [ # # # # ]: 0 : if (mlx5_is_port_on_mpesw_device(priv) && priv->sh->config.dv_flow_en != 2) {
1345 : 0 : DRV_LOG(ERR, "MPESW device is supported only with HWS");
1346 : : err = ENOTSUP;
1347 : 0 : goto error;
1348 : : }
1349 : : /*
1350 : : * If we have E-Switch we should determine the vport attributes.
1351 : : * E-Switch may use either source vport field or reg_c[0] metadata
1352 : : * register to match on vport index. The engaged part of metadata
1353 : : * register is defined by mask.
1354 : : */
1355 [ # # ]: 0 : if (sh->esw_mode) {
1356 : 0 : err = mlx5_glue->devx_port_query(sh->cdev->ctx,
1357 : : spawn->phys_port,
1358 : : &vport_info);
1359 [ # # ]: 0 : if (err) {
1360 : 0 : DRV_LOG(WARNING,
1361 : : "Cannot query devx port %d on device %s",
1362 : : spawn->phys_port, spawn->phys_dev_name);
1363 : 0 : vport_info.query_flags = 0;
1364 : : }
1365 : : }
1366 [ # # ]: 0 : if (vport_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1367 : 0 : priv->vport_meta_tag = vport_info.vport_meta_tag;
1368 : 0 : priv->vport_meta_mask = vport_info.vport_meta_mask;
1369 [ # # ]: 0 : if (!priv->vport_meta_mask) {
1370 : 0 : DRV_LOG(ERR,
1371 : : "vport zero mask for port %d on bonding device %s",
1372 : : spawn->phys_port, spawn->phys_dev_name);
1373 : : err = ENOTSUP;
1374 : 0 : goto error;
1375 : : }
1376 [ # # ]: 0 : if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
1377 : 0 : DRV_LOG(ERR,
1378 : : "Invalid vport tag for port %d on bonding device %s",
1379 : : spawn->phys_port, spawn->phys_dev_name);
1380 : : err = ENOTSUP;
1381 : 0 : goto error;
1382 : : }
1383 : : }
1384 [ # # ]: 0 : if (vport_info.query_flags & MLX5_PORT_QUERY_VPORT) {
1385 : 0 : priv->vport_id = vport_info.vport_id;
1386 [ # # # # ]: 0 : } else if (spawn->pf_bond >= 0 && sh->esw_mode) {
1387 : 0 : DRV_LOG(ERR,
1388 : : "Cannot deduce vport index for port %d on bonding device %s",
1389 : : spawn->phys_port, spawn->phys_dev_name);
1390 : : err = ENOTSUP;
1391 : 0 : goto error;
1392 : : } else {
1393 : : /*
1394 : : * Suppose vport index in compatible way. Kernel/rdma_core
1395 : : * support single E-Switch per PF configurations only and
1396 : : * vport_id field contains the vport index for associated VF,
1397 : : * which is deduced from representor port name.
1398 : : * For example, let's have the IB device port 10, it has
1399 : : * attached network device eth0, which has port name attribute
1400 : : * pf0vf2, we can deduce the VF number as 2, and set vport index
1401 : : * as 3 (2+1). This assigning schema should be changed if the
1402 : : * multiple E-Switch instances per PF configurations or/and PCI
1403 : : * subfunctions are added.
1404 : : */
1405 [ # # ]: 0 : priv->vport_id = switch_info->representor ?
1406 : 0 : switch_info->port_name + 1 : -1;
1407 : : }
1408 : 0 : priv->representor_id = mlx5_representor_id_encode(switch_info,
1409 : : eth_da->type);
1410 : : /*
1411 : : * Look for sibling devices in order to reuse their switch domain
1412 : : * if any, otherwise allocate one.
1413 : : */
1414 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
1415 : 0 : const struct mlx5_priv *opriv =
1416 : 0 : rte_eth_devices[port_id].data->dev_private;
1417 : :
1418 [ # # ]: 0 : if (!opriv ||
1419 [ # # ]: 0 : opriv->sh != priv->sh ||
1420 [ # # ]: 0 : opriv->domain_id ==
1421 : : RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1422 : : continue;
1423 : 0 : priv->domain_id = opriv->domain_id;
1424 : 0 : DRV_LOG(DEBUG, "dev_port-%u inherit domain_id=%u\n",
1425 : : priv->dev_port, priv->domain_id);
1426 : 0 : break;
1427 : : }
1428 [ # # ]: 0 : if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1429 : 0 : err = rte_eth_switch_domain_alloc(&priv->domain_id);
1430 [ # # ]: 0 : if (err) {
1431 : 0 : err = rte_errno;
1432 : 0 : DRV_LOG(ERR, "unable to allocate switch domain: %s",
1433 : : strerror(rte_errno));
1434 : 0 : goto error;
1435 : : }
1436 : : own_domain_id = 1;
1437 : 0 : DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
1438 : : priv->dev_port, priv->domain_id);
1439 : : }
1440 [ # # ]: 0 : if (sh->cdev->config.devx) {
1441 : : struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
1442 : :
1443 : 0 : sh->steering_format_version = hca_attr->steering_format_version;
1444 : : #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT)
1445 [ # # # # ]: 0 : if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old &&
1446 : : sh->config.dv_flow_en) {
1447 [ # # ]: 0 : if (sh->registers.aso_reg != REG_NON) {
1448 : 0 : priv->mtr_en = 1;
1449 : 0 : priv->mtr_reg_share = hca_attr->qos.flow_meter;
1450 : : }
1451 : : }
1452 [ # # ]: 0 : if (hca_attr->qos.sup && hca_attr->qos.flow_meter_aso_sup) {
1453 : : uint32_t log_obj_size =
1454 : : rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
1455 : 0 : if (log_obj_size >=
1456 [ # # ]: 0 : hca_attr->qos.log_meter_aso_granularity &&
1457 : : log_obj_size <=
1458 [ # # ]: 0 : hca_attr->qos.log_meter_aso_max_alloc)
1459 : 0 : sh->meter_aso_en = 1;
1460 : : }
1461 [ # # ]: 0 : if (priv->mtr_en) {
1462 : 0 : err = mlx5_aso_flow_mtrs_mng_init(priv->sh);
1463 [ # # ]: 0 : if (err) {
1464 : 0 : err = -err;
1465 : 0 : goto error;
1466 : : }
1467 : : }
1468 [ # # ]: 0 : if (hca_attr->flow.tunnel_header_0_1)
1469 : 0 : sh->tunnel_header_0_1 = 1;
1470 [ # # ]: 0 : if (hca_attr->flow.tunnel_header_2_3)
1471 : 0 : sh->tunnel_header_2_3 = 1;
1472 : : #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT */
1473 : : #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
1474 [ # # # # ]: 0 : if (hca_attr->flow_hit_aso && sh->registers.aso_reg == REG_C_3) {
1475 : 0 : sh->flow_hit_aso_en = 1;
1476 : 0 : err = mlx5_flow_aso_age_mng_init(sh);
1477 [ # # ]: 0 : if (err) {
1478 : 0 : err = -err;
1479 : 0 : goto error;
1480 : : }
1481 : 0 : DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
1482 : : }
1483 : : #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
1484 : : #if defined (HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
1485 : : defined (HAVE_MLX5_DR_ACTION_ASO_CT)
1486 : : /* HWS create CT ASO SQ based on HWS configure queue number. */
1487 [ # # # # ]: 0 : if (sh->config.dv_flow_en != 2 &&
1488 [ # # ]: 0 : hca_attr->ct_offload && sh->registers.aso_reg == REG_C_3) {
1489 : 0 : err = mlx5_flow_aso_ct_mng_init(sh);
1490 [ # # ]: 0 : if (err) {
1491 : 0 : err = -err;
1492 : 0 : goto error;
1493 : : }
1494 : 0 : DRV_LOG(DEBUG, "CT ASO is supported.");
1495 : 0 : sh->ct_aso_en = 1;
1496 : : }
1497 : : #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
1498 : : #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
1499 [ # # # # ]: 0 : if (hca_attr->log_max_ft_sampler_num > 0 &&
1500 : : sh->config.dv_flow_en) {
1501 : 0 : priv->sampler_en = 1;
1502 : 0 : DRV_LOG(DEBUG, "Sampler enabled!");
1503 : : } else {
1504 : 0 : priv->sampler_en = 0;
1505 [ # # ]: 0 : if (!hca_attr->log_max_ft_sampler_num)
1506 : 0 : DRV_LOG(WARNING,
1507 : : "No available register for sampler.");
1508 : : else
1509 : 0 : DRV_LOG(DEBUG, "DV flow is not supported!");
1510 : : }
1511 : : #endif
1512 [ # # ]: 0 : if (hca_attr->lag_rx_port_affinity) {
1513 : 0 : sh->lag_rx_port_affinity_en = 1;
1514 : 0 : DRV_LOG(DEBUG, "LAG Rx Port Affinity enabled");
1515 : : }
1516 : 0 : priv->num_lag_ports = hca_attr->num_lag_ports;
1517 : 0 : DRV_LOG(DEBUG, "The number of lag ports is %d", priv->num_lag_ports);
1518 : : }
1519 : : /* Process parameters and store port configuration on priv structure. */
1520 : 0 : err = mlx5_port_args_config(priv, mkvlist, &priv->config);
1521 [ # # ]: 0 : if (err) {
1522 : 0 : err = rte_errno;
1523 : 0 : DRV_LOG(ERR, "Failed to process port configure: %s",
1524 : : strerror(rte_errno));
1525 : 0 : goto error;
1526 : : }
1527 : 0 : eth_dev = rte_eth_dev_allocate(name);
1528 [ # # ]: 0 : if (eth_dev == NULL) {
1529 : 0 : DRV_LOG(ERR, "can not allocate rte ethdev");
1530 : : err = ENOMEM;
1531 : 0 : goto error;
1532 : : }
1533 [ # # ]: 0 : if (priv->representor) {
1534 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1535 : 0 : eth_dev->data->representor_id = priv->representor_id;
1536 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
1537 : 0 : struct mlx5_priv *opriv =
1538 : 0 : rte_eth_devices[port_id].data->dev_private;
1539 [ # # # # ]: 0 : if (opriv &&
1540 : 0 : opriv->master &&
1541 [ # # ]: 0 : opriv->domain_id == priv->domain_id &&
1542 [ # # ]: 0 : opriv->sh == priv->sh) {
1543 : 0 : eth_dev->data->backer_port_id = port_id;
1544 : 0 : break;
1545 : : }
1546 : : }
1547 [ # # ]: 0 : if (port_id >= RTE_MAX_ETHPORTS)
1548 : 0 : eth_dev->data->backer_port_id = eth_dev->data->port_id;
1549 : : }
1550 : 0 : priv->mp_id.port_id = eth_dev->data->port_id;
1551 : 0 : strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
1552 : : /*
1553 : : * Store associated network device interface index. This index
1554 : : * is permanent throughout the lifetime of device. So, we may store
1555 : : * the ifindex here and use the cached value further.
1556 : : */
1557 : : MLX5_ASSERT(spawn->ifindex);
1558 : 0 : priv->if_index = spawn->ifindex;
1559 : 0 : priv->lag_affinity_idx = sh->refcnt - 1;
1560 : 0 : eth_dev->data->dev_private = priv;
1561 : 0 : priv->dev_data = eth_dev->data;
1562 : 0 : eth_dev->data->mac_addrs = priv->mac;
1563 : 0 : eth_dev->device = dpdk_dev;
1564 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1565 : : /* Configure the first MAC address by default. */
1566 [ # # ]: 0 : if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1567 : 0 : DRV_LOG(ERR,
1568 : : "port %u cannot get MAC address, is mlx5_en"
1569 : : " loaded? (errno: %s)",
1570 : : eth_dev->data->port_id, strerror(rte_errno));
1571 : : err = ENODEV;
1572 : 0 : goto error;
1573 : : }
1574 : 0 : DRV_LOG(INFO,
1575 : : "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
1576 : : eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
1577 : : #ifdef RTE_LIBRTE_MLX5_DEBUG
1578 : : {
1579 : : char ifname[MLX5_NAMESIZE];
1580 : :
1581 : : if (mlx5_get_ifname(eth_dev, &ifname) == 0)
1582 : : DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1583 : : eth_dev->data->port_id, ifname);
1584 : : else
1585 : : DRV_LOG(DEBUG, "port %u ifname is unknown",
1586 : : eth_dev->data->port_id);
1587 : : }
1588 : : #endif
1589 : : /* Get actual MTU if possible. */
1590 : 0 : err = mlx5_get_mtu(eth_dev, &priv->mtu);
1591 [ # # ]: 0 : if (err) {
1592 : 0 : err = rte_errno;
1593 : 0 : goto error;
1594 : : }
1595 : 0 : DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1596 : : priv->mtu);
1597 : : /* Initialize burst functions to prevent crashes before link-up. */
1598 : 0 : eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
1599 : 0 : eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
1600 : 0 : eth_dev->dev_ops = &mlx5_dev_ops;
1601 : 0 : eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1602 : 0 : eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1603 : 0 : eth_dev->rx_queue_count = mlx5_rx_queue_count;
1604 : : /* Register MAC address. */
1605 : 0 : claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1606 : : /* Sync mac addresses for PF or VF/SF if vf_nl_en is true */
1607 [ # # # # ]: 0 : if ((!sh->dev_cap.vf && !sh->dev_cap.sf) || sh->config.vf_nl_en)
1608 : 0 : mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1609 : : mlx5_ifindex(eth_dev),
1610 : 0 : eth_dev->data->mac_addrs,
1611 : : MLX5_MAX_MAC_ADDRESSES);
1612 [ # # ]: 0 : priv->ctrl_flows = 0;
1613 : : rte_spinlock_init(&priv->flow_list_lock);
1614 : 0 : TAILQ_INIT(&priv->flow_meters);
1615 [ # # ]: 0 : if (priv->mtr_en) {
1616 : 0 : priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
1617 [ # # ]: 0 : if (!priv->mtr_profile_tbl)
1618 : 0 : goto error;
1619 : : }
1620 : : /* Bring Ethernet device up. */
1621 : 0 : DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1622 : : eth_dev->data->port_id);
1623 : : /* Read link status in case it is up and there will be no event. */
1624 : 0 : mlx5_link_update(eth_dev, 0);
1625 : : /* Watch LSC interrupts between port probe and port start. */
1626 : 0 : priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
1627 : 0 : eth_dev->data->port_id;
1628 : 0 : mlx5_set_link_up(eth_dev);
1629 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
1630 : 0 : icfg[i].release_mem_en = !!sh->config.reclaim_mode;
1631 [ # # ]: 0 : if (sh->config.reclaim_mode)
1632 : 0 : icfg[i].per_core_cache = 0;
1633 : : #ifdef HAVE_MLX5_HWS_SUPPORT
1634 [ # # ]: 0 : if (priv->sh->config.dv_flow_en == 2)
1635 : 0 : icfg[i].size = sizeof(struct rte_flow_hw) + sizeof(struct rte_flow_nt2hws);
1636 : : #endif
1637 : 0 : priv->flows[i] = mlx5_ipool_create(&icfg[i]);
1638 [ # # ]: 0 : if (!priv->flows[i])
1639 : 0 : goto error;
1640 : : }
1641 : : /* Create context for virtual machine VLAN workaround. */
1642 [ # # ]: 0 : priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1643 [ # # ]: 0 : if (mlx5_devx_obj_ops_en(sh)) {
1644 : 0 : priv->obj_ops = devx_obj_ops;
1645 : 0 : mlx5_queue_counter_id_prepare(eth_dev);
1646 : 0 : priv->obj_ops.lb_dummy_queue_create =
1647 : : mlx5_rxq_ibv_obj_dummy_lb_create;
1648 : 0 : priv->obj_ops.lb_dummy_queue_release =
1649 : : mlx5_rxq_ibv_obj_dummy_lb_release;
1650 [ # # ]: 0 : } else if (spawn->max_port > UINT8_MAX) {
1651 : : /* Verbs can't support ports larger than 255 by design. */
1652 : 0 : DRV_LOG(ERR, "must enable DV and ESW when RDMA link ports > 255");
1653 : : err = ENOTSUP;
1654 : 0 : goto error;
1655 : : } else {
1656 : 0 : priv->obj_ops = ibv_obj_ops;
1657 : : }
1658 [ # # ]: 0 : if (sh->config.tx_pp &&
1659 [ # # ]: 0 : priv->obj_ops.txq_obj_new != mlx5_txq_devx_obj_new) {
1660 : : /*
1661 : : * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support
1662 : : * packet pacing and already checked above.
1663 : : * Hence, we should only make sure the SQs will be created
1664 : : * with DevX, not with Verbs.
1665 : : * Verbs allocates the SQ UAR on its own and it can't be shared
1666 : : * with Clock Queue UAR as required for Tx scheduling.
1667 : : */
1668 : 0 : DRV_LOG(ERR, "Verbs SQs, UAR can't be shared as required for packet pacing");
1669 : : err = ENODEV;
1670 : 0 : goto error;
1671 : : }
1672 : 0 : priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
1673 [ # # ]: 0 : if (!priv->drop_queue.hrxq)
1674 : 0 : goto error;
1675 : 0 : priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
1676 : : mlx5_hrxq_create_cb,
1677 : : mlx5_hrxq_match_cb,
1678 : : mlx5_hrxq_remove_cb,
1679 : : mlx5_hrxq_clone_cb,
1680 : : mlx5_hrxq_clone_free_cb);
1681 [ # # ]: 0 : if (!priv->hrxqs)
1682 : 0 : goto error;
1683 : 0 : mlx5_set_metadata_mask(eth_dev);
1684 [ # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1685 [ # # ]: 0 : !priv->sh->dv_regc0_mask) {
1686 : 0 : DRV_LOG(ERR, "metadata mode %u is not supported "
1687 : : "(no metadata reg_c[0] is available)",
1688 : : sh->config.dv_xmeta_en);
1689 : : err = ENOTSUP;
1690 : 0 : goto error;
1691 : : }
1692 : : rte_rwlock_init(&priv->ind_tbls_lock);
1693 [ # # ]: 0 : if (sh->config.dv_flow_en) {
1694 : 0 : err = mlx5_alloc_shared_dr(eth_dev);
1695 [ # # ]: 0 : if (err)
1696 : 0 : goto error;
1697 [ # # ]: 0 : if (mlx5_flex_item_port_init(eth_dev) < 0)
1698 : 0 : goto error;
1699 : : }
1700 [ # # ]: 0 : if (sh->phdev->config.ipv6_tc_fallback == MLX5_IPV6_TC_UNKNOWN) {
1701 : 0 : sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_OK;
1702 [ # # ]: 0 : if (!sh->cdev->config.hca_attr.modify_outer_ipv6_traffic_class ||
1703 [ # # # # ]: 0 : (sh->config.dv_flow_en == 1 && mlx5_flow_discover_ipv6_tc_support(eth_dev)))
1704 : 0 : sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_FALLBACK;
1705 : : }
1706 : : rte_spinlock_init(&priv->hw_ctrl_lock);
1707 : 0 : LIST_INIT(&priv->hw_ctrl_flows);
1708 : 0 : LIST_INIT(&priv->hw_ext_ctrl_flows);
1709 [ # # ]: 0 : if (priv->sh->config.dv_flow_en == 2) {
1710 : : #ifdef HAVE_MLX5_HWS_SUPPORT
1711 : : /*
1712 : : * Unified FDB flag is only needed for the actions created on the transfer
1713 : : * port. proxy port. It is not needed on the following ports:
1714 : : * 1. NIC PF / VF / SF
1715 : : * 2. in Verbs or DV/DR mode
1716 : : * 3. with unsupported FW
1717 : : * 4. all representors in HWS
1718 : : */
1719 [ # # # # ]: 0 : priv->unified_fdb_en = !!priv->master && sh->cdev->config.hca_attr.fdb_unified_en;
1720 : : /* Jump FDB Rx works only with unified FDB enabled. */
1721 [ # # ]: 0 : if (priv->unified_fdb_en)
1722 : 0 : priv->jump_fdb_rx_en = sh->cdev->config.hca_attr.jump_fdb_rx_en;
1723 [ # # # # ]: 0 : DRV_LOG(DEBUG, "port %u: unified FDB %s enabled, jump_fdb_rx %s enabled.",
1724 : : eth_dev->data->port_id,
1725 : : priv->unified_fdb_en ? "is" : "isn't",
1726 : : priv->jump_fdb_rx_en ? "is" : "isn't");
1727 [ # # ]: 0 : if (priv->sh->config.dv_esw_en) {
1728 : : uint32_t usable_bits;
1729 : : uint32_t required_bits;
1730 : :
1731 [ # # ]: 0 : if (priv->sh->dv_regc0_mask == UINT32_MAX) {
1732 : 0 : DRV_LOG(ERR, "E-Switch port metadata is required when using HWS "
1733 : : "but it is disabled (configure it through devlink)");
1734 : : err = ENOTSUP;
1735 : 0 : goto error;
1736 : : }
1737 [ # # ]: 0 : if (priv->sh->dv_regc0_mask == 0) {
1738 : 0 : DRV_LOG(ERR, "E-Switch with HWS is not supported "
1739 : : "(no available bits in reg_c[0])");
1740 : : err = ENOTSUP;
1741 : 0 : goto error;
1742 : : }
1743 : : usable_bits = rte_popcount32(priv->sh->dv_regc0_mask);
1744 : 0 : required_bits = rte_popcount32(priv->vport_meta_mask);
1745 [ # # ]: 0 : if (usable_bits < required_bits) {
1746 : 0 : DRV_LOG(ERR, "Not enough bits available in reg_c[0] to provide "
1747 : : "representor matching.");
1748 : : err = ENOTSUP;
1749 : 0 : goto error;
1750 : : }
1751 : : }
1752 [ # # ]: 0 : if (priv->vport_meta_mask)
1753 : 0 : flow_hw_set_port_info(eth_dev);
1754 [ # # ]: 0 : if (priv->sh->config.dv_esw_en &&
1755 [ # # # # ]: 0 : priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1756 : : priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) {
1757 : 0 : DRV_LOG(ERR,
1758 : : "metadata mode %u is not supported in HWS eswitch mode",
1759 : : priv->sh->config.dv_xmeta_en);
1760 : : err = ENOTSUP;
1761 : 0 : goto error;
1762 : : }
1763 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en &&
1764 : 0 : flow_hw_create_vport_action(eth_dev)) {
1765 : 0 : DRV_LOG(ERR, "port %u failed to create vport action",
1766 : : eth_dev->data->port_id);
1767 : : err = EINVAL;
1768 : 0 : goto error;
1769 : : }
1770 : : /*
1771 : : * If representor matching is disabled, PMD cannot create default flow rules
1772 : : * to receive traffic for all ports, since implicit source port match is not added.
1773 : : * Isolated mode is forced.
1774 : : */
1775 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en && !priv->sh->config.repr_matching) {
1776 : 0 : err = mlx5_flow_isolate(eth_dev, 1, NULL);
1777 [ # # ]: 0 : if (err < 0) {
1778 : 0 : err = -err;
1779 : 0 : goto error;
1780 : : }
1781 : 0 : DRV_LOG(WARNING, "port %u ingress traffic is restricted to defined "
1782 : : "flow rules (isolated mode) since representor "
1783 : : "matching is disabled",
1784 : : eth_dev->data->port_id);
1785 : : }
1786 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1787 : 0 : return eth_dev;
1788 : : #else
1789 : : DRV_LOG(ERR, "DV support is missing for HWS.");
1790 : : goto error;
1791 : : #endif
1792 : : }
1793 [ # # ]: 0 : if (!priv->sh->flow_priority_check_flag) {
1794 : : /* Supported Verbs flow priority number detection. */
1795 : 0 : err = mlx5_flow_discover_priorities(eth_dev);
1796 : 0 : priv->sh->flow_max_priority = err;
1797 : 0 : priv->sh->flow_priority_check_flag = 1;
1798 : : } else {
1799 : 0 : err = priv->sh->flow_max_priority;
1800 : : }
1801 [ # # ]: 0 : if (err < 0) {
1802 : 0 : err = -err;
1803 : 0 : goto error;
1804 : : }
1805 : : rte_spinlock_init(&priv->shared_act_sl);
1806 : 0 : mlx5_flow_counter_mode_config(eth_dev);
1807 : 0 : mlx5_flow_drop_action_config(eth_dev);
1808 [ # # ]: 0 : if (sh->config.dv_flow_en)
1809 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1810 : : return eth_dev;
1811 : 0 : error:
1812 [ # # ]: 0 : if (priv) {
1813 : 0 : priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
1814 : : RTE_MAX_ETHPORTS;
1815 : 0 : rte_io_wmb();
1816 : : #ifdef HAVE_MLX5_HWS_SUPPORT
1817 [ # # ]: 0 : if (eth_dev &&
1818 [ # # ]: 0 : priv->sh &&
1819 [ # # ]: 0 : priv->sh->config.dv_flow_en == 2 &&
1820 : : priv->sh->config.dv_esw_en)
1821 : 0 : flow_hw_destroy_vport_action(eth_dev);
1822 : : #endif
1823 [ # # ]: 0 : if (priv->sh)
1824 : 0 : mlx5_os_free_shared_dr(priv);
1825 [ # # ]: 0 : if (priv->nl_socket_route >= 0)
1826 : 0 : close(priv->nl_socket_route);
1827 [ # # ]: 0 : if (priv->vmwa_context)
1828 : 0 : mlx5_vlan_vmwa_exit(priv->vmwa_context);
1829 [ # # # # ]: 0 : if (eth_dev && priv->drop_queue.hrxq)
1830 : 0 : mlx5_drop_action_destroy(eth_dev);
1831 [ # # ]: 0 : if (priv->mtr_profile_tbl)
1832 : 0 : mlx5_l3t_destroy(priv->mtr_profile_tbl);
1833 [ # # ]: 0 : if (own_domain_id)
1834 : 0 : claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1835 [ # # ]: 0 : if (priv->hrxqs)
1836 : 0 : mlx5_list_destroy(priv->hrxqs);
1837 [ # # # # ]: 0 : if (eth_dev && priv->flex_item_map)
1838 : 0 : mlx5_flex_item_port_cleanup(eth_dev);
1839 : 0 : mlx5_free(priv->ext_rxqs);
1840 : 0 : mlx5_free(priv->ext_txqs);
1841 : 0 : mlx5_free(priv);
1842 [ # # ]: 0 : if (eth_dev != NULL)
1843 : 0 : eth_dev->data->dev_private = NULL;
1844 : : }
1845 [ # # ]: 0 : if (eth_dev != NULL) {
1846 : : /* mac_addrs must not be freed alone because part of
1847 : : * dev_private
1848 : : **/
1849 : 0 : eth_dev->data->mac_addrs = NULL;
1850 : 0 : rte_eth_dev_release_port(eth_dev);
1851 : : }
1852 : : if (sh)
1853 : 0 : mlx5_free_shared_dev_ctx(sh);
1854 [ # # ]: 0 : if (nl_rdma >= 0)
1855 : 0 : close(nl_rdma);
1856 : : MLX5_ASSERT(err > 0);
1857 : 0 : rte_errno = err;
1858 : 0 : return NULL;
1859 : : }
1860 : :
1861 : : /**
1862 : : * Comparison callback to sort device data.
1863 : : *
1864 : : * This is meant to be used with qsort().
1865 : : *
1866 : : * @param a[in]
1867 : : * Pointer to pointer to first data object.
1868 : : * @param b[in]
1869 : : * Pointer to pointer to second data object.
1870 : : *
1871 : : * @return
1872 : : * 0 if both objects are equal, less than 0 if the first argument is less
1873 : : * than the second, greater than 0 otherwise.
1874 : : */
1875 : : static int
1876 : 0 : mlx5_dev_spawn_data_cmp(const void *a, const void *b)
1877 : : {
1878 : : const struct mlx5_switch_info *si_a =
1879 : : &((const struct mlx5_dev_spawn_data *)a)->info;
1880 : : const struct mlx5_switch_info *si_b =
1881 : : &((const struct mlx5_dev_spawn_data *)b)->info;
1882 : 0 : int uplink_a = si_a->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK;
1883 : 0 : int uplink_b = si_b->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK;
1884 : : int ret;
1885 : :
1886 : : /* Uplink ports first. */
1887 : 0 : ret = uplink_b - uplink_a;
1888 [ # # ]: 0 : if (ret)
1889 : : return ret;
1890 : : /* Then master devices. */
1891 : 0 : ret = si_b->master - si_a->master;
1892 [ # # ]: 0 : if (ret)
1893 : : return ret;
1894 : : /* Then representor devices. */
1895 : 0 : ret = si_b->representor - si_a->representor;
1896 [ # # ]: 0 : if (ret)
1897 : : return ret;
1898 : : /* Unidentified devices come last in no specific order. */
1899 [ # # ]: 0 : if (!si_a->representor)
1900 : : return 0;
1901 : : /* Order representors by name. */
1902 : 0 : return si_a->port_name - si_b->port_name;
1903 : : }
1904 : :
1905 : : /**
1906 : : * Match PCI information for possible slaves of bonding device.
1907 : : *
1908 : : * @param[in] ibdev_name
1909 : : * Name of Infiniband device.
1910 : : * @param[in] pci_dev
1911 : : * Pointer to primary PCI address structure to match.
1912 : : * @param[in] nl_rdma
1913 : : * Netlink RDMA group socket handle.
1914 : : * @param[in] owner
1915 : : * Representor owner PF index.
1916 : : * @param[in] dev_info
1917 : : * Cached mlx5 device information.
1918 : : * @param[out] bond_info
1919 : : * Pointer to bonding information.
1920 : : *
1921 : : * @return
1922 : : * negative value if no bonding device found, otherwise
1923 : : * positive index of slave PF in bonding.
1924 : : */
1925 : : static int
1926 : 0 : mlx5_device_bond_pci_match(const char *ibdev_name,
1927 : : const struct rte_pci_addr *pci_dev,
1928 : : int nl_rdma, uint16_t owner,
1929 : : struct mlx5_dev_info *dev_info,
1930 : : struct mlx5_bond_info *bond_info)
1931 : : {
1932 : : char ifname[IF_NAMESIZE + 1];
1933 : : unsigned int ifindex;
1934 : : unsigned int np, i;
1935 : : FILE *bond_file = NULL, *file;
1936 : : int pf = -1;
1937 : : int ret;
1938 : 0 : uint8_t cur_guid[32] = {0};
1939 [ # # ]: 0 : uint8_t guid[32] = {0};
1940 : :
1941 : : /*
1942 : : * Try to get master device name. If something goes wrong suppose
1943 : : * the lack of kernel support and no bonding devices.
1944 : : */
1945 : : memset(bond_info, 0, sizeof(*bond_info));
1946 [ # # ]: 0 : if (nl_rdma < 0)
1947 : : return -1;
1948 [ # # ]: 0 : if (!strstr(ibdev_name, "bond"))
1949 : : return -1;
1950 : 0 : np = mlx5_nl_portnum(nl_rdma, ibdev_name, dev_info);
1951 [ # # ]: 0 : if (!np)
1952 : : return -1;
1953 [ # # ]: 0 : if (mlx5_get_device_guid(pci_dev, cur_guid, sizeof(cur_guid)) < 0)
1954 : : return -1;
1955 : : /*
1956 : : * The master device might not be on the predefined port(not on port
1957 : : * index 1, it is not guaranteed), we have to scan all Infiniband
1958 : : * device ports and find master.
1959 : : */
1960 [ # # ]: 0 : for (i = 1; i <= np; ++i) {
1961 : : /* Check whether Infiniband port is populated. */
1962 : 0 : ifindex = mlx5_nl_ifindex(nl_rdma, ibdev_name, i, dev_info);
1963 [ # # ]: 0 : if (!ifindex)
1964 : 0 : continue;
1965 [ # # ]: 0 : if (!if_indextoname(ifindex, ifname))
1966 : 0 : continue;
1967 : : /* Try to read bonding slave names from sysfs. */
1968 : 0 : MKSTR(slaves,
1969 : : "/sys/class/net/%s/master/bonding/slaves", ifname);
1970 : 0 : bond_file = fopen(slaves, "r");
1971 [ # # ]: 0 : if (bond_file)
1972 : : break;
1973 : : }
1974 [ # # ]: 0 : if (!bond_file)
1975 : : return -1;
1976 : : /* Use safe format to check maximal buffer length. */
1977 : : MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
1978 [ # # ]: 0 : while (fscanf(bond_file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
1979 : : char tmp_str[IF_NAMESIZE + 32];
1980 : : struct rte_pci_addr pci_addr;
1981 : : struct mlx5_switch_info info;
1982 : : int ret;
1983 : :
1984 : : /* Process slave interface names in the loop. */
1985 : : snprintf(tmp_str, sizeof(tmp_str),
1986 : : "/sys/class/net/%s", ifname);
1987 [ # # ]: 0 : if (mlx5_get_pci_addr(tmp_str, &pci_addr)) {
1988 : 0 : DRV_LOG(WARNING,
1989 : : "Cannot get PCI address for netdev \"%s\".",
1990 : : ifname);
1991 : 0 : continue;
1992 : : }
1993 : : /* Slave interface PCI address match found. */
1994 : : snprintf(tmp_str, sizeof(tmp_str),
1995 : : "/sys/class/net/%s/phys_port_name", ifname);
1996 : 0 : file = fopen(tmp_str, "rb");
1997 [ # # ]: 0 : if (!file)
1998 : : break;
1999 : 0 : info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
2000 [ # # ]: 0 : if (fscanf(file, "%32s", tmp_str) == 1) {
2001 : 0 : mlx5_translate_port_name(tmp_str, &info);
2002 : 0 : fclose(file);
2003 : : } else {
2004 : 0 : fclose(file);
2005 : 0 : break;
2006 : : }
2007 : : /* Only process PF ports. */
2008 [ # # ]: 0 : if (info.name_type != MLX5_PHYS_PORT_NAME_TYPE_LEGACY &&
2009 : : info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2010 : 0 : continue;
2011 : : /* Check max bonding member. */
2012 [ # # ]: 0 : if (info.port_name >= MLX5_BOND_MAX_PORTS) {
2013 : 0 : DRV_LOG(WARNING, "bonding index out of range, "
2014 : : "please increase MLX5_BOND_MAX_PORTS: %s",
2015 : : tmp_str);
2016 : 0 : break;
2017 : : }
2018 : : /* Get ifindex. */
2019 : : snprintf(tmp_str, sizeof(tmp_str),
2020 : : "/sys/class/net/%s/ifindex", ifname);
2021 : 0 : file = fopen(tmp_str, "rb");
2022 [ # # ]: 0 : if (!file)
2023 : : break;
2024 : 0 : ret = fscanf(file, "%u", &ifindex);
2025 : 0 : fclose(file);
2026 [ # # ]: 0 : if (ret != 1)
2027 : : break;
2028 : : /* Save bonding info. */
2029 : 0 : snprintf(bond_info->ports[info.port_name].ifname,
2030 : : sizeof(bond_info->ports[0].ifname), "%s", ifname);
2031 : 0 : bond_info->ports[info.port_name].pci_addr = pci_addr;
2032 : 0 : bond_info->ports[info.port_name].ifindex = ifindex;
2033 : 0 : bond_info->n_port++;
2034 : : /*
2035 : : * Under socket direct mode, bonding will use
2036 : : * system_image_guid as identification.
2037 : : * After OFED 5.4, guid is readable (ret >= 0) under sysfs.
2038 : : * All bonding members should have the same guid even if driver
2039 : : * is using PCIe BDF.
2040 : : */
2041 : 0 : ret = mlx5_get_device_guid(&pci_addr, guid, sizeof(guid));
2042 [ # # ]: 0 : if (ret < 0)
2043 : : break;
2044 [ # # ]: 0 : else if (ret > 0) {
2045 [ # # ]: 0 : if (!memcmp(guid, cur_guid, sizeof(guid)) &&
2046 [ # # # # ]: 0 : owner == info.port_name &&
2047 [ # # ]: 0 : (owner != 0 || (owner == 0 &&
2048 : 0 : !rte_pci_addr_cmp(pci_dev, &pci_addr))))
2049 : 0 : pf = info.port_name;
2050 [ # # ]: 0 : } else if (pci_dev->domain == pci_addr.domain &&
2051 [ # # ]: 0 : pci_dev->bus == pci_addr.bus &&
2052 : 0 : pci_dev->devid == pci_addr.devid &&
2053 [ # # ]: 0 : ((pci_dev->function == 0 &&
2054 [ # # ]: 0 : pci_dev->function + owner == pci_addr.function) ||
2055 [ # # ]: 0 : (pci_dev->function == owner &&
2056 [ # # ]: 0 : pci_addr.function == owner)))
2057 : 0 : pf = info.port_name;
2058 : : }
2059 : 0 : fclose(bond_file);
2060 [ # # ]: 0 : if (pf >= 0) {
2061 : : /* Get bond interface info */
2062 : 0 : ret = mlx5_sysfs_bond_info(ifindex, &bond_info->ifindex,
2063 : 0 : bond_info->ifname);
2064 [ # # ]: 0 : if (ret)
2065 : 0 : DRV_LOG(ERR, "unable to get bond info: %s",
2066 : : strerror(rte_errno));
2067 : : else
2068 : 0 : DRV_LOG(INFO, "PF device %u, bond device %u(%s)",
2069 : : ifindex, bond_info->ifindex, bond_info->ifname);
2070 : : }
2071 [ # # ]: 0 : if (owner == 0 && pf != 0) {
2072 : 0 : DRV_LOG(INFO, "PCIe instance " PCI_PRI_FMT " isn't bonding owner",
2073 : : pci_dev->domain, pci_dev->bus, pci_dev->devid,
2074 : : pci_dev->function);
2075 : : }
2076 : : return pf;
2077 : : }
2078 : :
2079 : : static int
2080 : 0 : mlx5_nl_esw_multiport_get(struct rte_pci_addr *pci_addr, int *enabled)
2081 : : {
2082 : 0 : char pci_addr_str[PCI_PRI_STR_SIZE] = { 0 };
2083 : : int nlsk_fd;
2084 : : int devlink_id;
2085 : : int ret;
2086 : :
2087 : : /* Provide correct value to have defined enabled state in case of an error. */
2088 : 0 : *enabled = 0;
2089 : 0 : rte_pci_device_name(pci_addr, pci_addr_str, sizeof(pci_addr_str));
2090 : 0 : nlsk_fd = mlx5_nl_init(NETLINK_GENERIC, 0);
2091 [ # # ]: 0 : if (nlsk_fd < 0)
2092 : : return nlsk_fd;
2093 : 0 : devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
2094 [ # # ]: 0 : if (devlink_id < 0) {
2095 : : ret = devlink_id;
2096 : 0 : DRV_LOG(DEBUG, "Unable to get devlink family id for Multiport E-Switch checks "
2097 : : "by netlink, for PCI device %s", pci_addr_str);
2098 : 0 : goto close_nlsk_fd;
2099 : : }
2100 : 0 : ret = mlx5_nl_devlink_esw_multiport_get(nlsk_fd, devlink_id, pci_addr_str, enabled);
2101 [ # # ]: 0 : if (ret < 0)
2102 : 0 : DRV_LOG(DEBUG, "Unable to get Multiport E-Switch state by Netlink.");
2103 : 0 : close_nlsk_fd:
2104 : 0 : close(nlsk_fd);
2105 : 0 : return ret;
2106 : : }
2107 : :
2108 : : #define SYSFS_MPESW_PARAM_MAX_LEN 16
2109 : :
2110 : : static int
2111 : 0 : mlx5_sysfs_esw_multiport_get(struct ibv_device *ibv, struct rte_pci_addr *pci_addr, int *enabled,
2112 : : struct mlx5_dev_info *dev_info)
2113 : : {
2114 : : int nl_rdma;
2115 : : unsigned int n_ports;
2116 : : unsigned int i;
2117 : : int ret;
2118 : :
2119 : : /* Provide correct value to have defined enabled state in case of an error. */
2120 : 0 : *enabled = 0;
2121 : 0 : nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
2122 [ # # ]: 0 : if (nl_rdma < 0)
2123 : : return nl_rdma;
2124 : 0 : n_ports = mlx5_nl_portnum(nl_rdma, ibv->name, dev_info);
2125 [ # # ]: 0 : if (!n_ports) {
2126 : 0 : ret = -rte_errno;
2127 : 0 : goto close_nl_rdma;
2128 : : }
2129 [ # # ]: 0 : for (i = 1; i <= n_ports; ++i) {
2130 : : unsigned int ifindex;
2131 : : char ifname[IF_NAMESIZE + 1];
2132 : 0 : struct rte_pci_addr if_pci_addr = { 0 };
2133 : : char mpesw[SYSFS_MPESW_PARAM_MAX_LEN + 1];
2134 : : FILE *sysfs;
2135 : : int n;
2136 : :
2137 : 0 : ifindex = mlx5_nl_ifindex(nl_rdma, ibv->name, i, dev_info);
2138 [ # # ]: 0 : if (!ifindex)
2139 : 0 : continue;
2140 [ # # ]: 0 : if (!if_indextoname(ifindex, ifname))
2141 : 0 : continue;
2142 : 0 : MKSTR(sysfs_if_path, "/sys/class/net/%s", ifname);
2143 [ # # ]: 0 : if (mlx5_get_pci_addr(sysfs_if_path, &if_pci_addr))
2144 : 0 : continue;
2145 [ # # ]: 0 : if (pci_addr->domain != if_pci_addr.domain ||
2146 [ # # ]: 0 : pci_addr->bus != if_pci_addr.bus ||
2147 : 0 : pci_addr->devid != if_pci_addr.devid ||
2148 [ # # ]: 0 : pci_addr->function != if_pci_addr.function)
2149 : 0 : continue;
2150 : 0 : MKSTR(sysfs_mpesw_path,
2151 : : "/sys/class/net/%s/compat/devlink/lag_port_select_mode", ifname);
2152 : 0 : sysfs = fopen(sysfs_mpesw_path, "r");
2153 [ # # ]: 0 : if (!sysfs)
2154 : 0 : continue;
2155 : 0 : n = fscanf(sysfs, "%" RTE_STR(SYSFS_MPESW_PARAM_MAX_LEN) "s", mpesw);
2156 : 0 : fclose(sysfs);
2157 [ # # ]: 0 : if (n != 1)
2158 : 0 : continue;
2159 : : ret = 0;
2160 [ # # ]: 0 : if (strcmp(mpesw, "multiport_esw") == 0) {
2161 : 0 : *enabled = 1;
2162 : 0 : break;
2163 : : }
2164 : 0 : *enabled = 0;
2165 : 0 : break;
2166 : : }
2167 [ # # ]: 0 : if (i > n_ports) {
2168 : 0 : DRV_LOG(DEBUG, "Unable to get Multiport E-Switch state by sysfs.");
2169 : 0 : rte_errno = ENOENT;
2170 : : ret = -rte_errno;
2171 : : }
2172 : :
2173 : 0 : close_nl_rdma:
2174 : 0 : close(nl_rdma);
2175 : 0 : return ret;
2176 : : }
2177 : :
2178 : : static int
2179 : 0 : mlx5_is_mpesw_enabled(struct ibv_device *ibv, struct rte_pci_addr *ibv_pci_addr, int *enabled,
2180 : : struct mlx5_dev_info *dev_info)
2181 : : {
2182 : : /*
2183 : : * Try getting Multiport E-Switch state through netlink interface
2184 : : * If unable, try sysfs interface. If that is unable as well,
2185 : : * assume that Multiport E-Switch is disabled and return an error.
2186 : : */
2187 [ # # # # ]: 0 : if (mlx5_nl_esw_multiport_get(ibv_pci_addr, enabled) >= 0 ||
2188 : 0 : mlx5_sysfs_esw_multiport_get(ibv, ibv_pci_addr, enabled, dev_info) >= 0)
2189 : 0 : return 0;
2190 : 0 : DRV_LOG(DEBUG, "Unable to check MPESW state for IB device %s "
2191 : : "(PCI: " PCI_PRI_FMT ")",
2192 : : ibv->name,
2193 : : ibv_pci_addr->domain, ibv_pci_addr->bus,
2194 : : ibv_pci_addr->devid, ibv_pci_addr->function);
2195 : 0 : *enabled = 0;
2196 : 0 : return -rte_errno;
2197 : : }
2198 : :
2199 : : static int
2200 : 0 : mlx5_device_mpesw_pci_match(struct ibv_device *ibv,
2201 : : const struct rte_pci_addr *owner_pci,
2202 : : int nl_rdma, struct mlx5_dev_info *dev_info)
2203 : : {
2204 : 0 : struct rte_pci_addr ibdev_pci_addr = { 0 };
2205 : 0 : char ifname[IF_NAMESIZE + 1] = { 0 };
2206 : : unsigned int ifindex;
2207 : : unsigned int np;
2208 : : unsigned int i;
2209 : 0 : int enabled = 0;
2210 : : int ret;
2211 : :
2212 : : /* Check if IB device's PCI address matches the probed PCI address. */
2213 [ # # ]: 0 : if (mlx5_get_pci_addr(ibv->ibdev_path, &ibdev_pci_addr)) {
2214 : 0 : DRV_LOG(DEBUG, "Skipping MPESW check for IB device %s since "
2215 : : "there is no underlying PCI device", ibv->name);
2216 : 0 : rte_errno = ENOENT;
2217 : 0 : return -rte_errno;
2218 : : }
2219 [ # # ]: 0 : if (ibdev_pci_addr.domain != owner_pci->domain ||
2220 [ # # ]: 0 : ibdev_pci_addr.bus != owner_pci->bus ||
2221 : 0 : ibdev_pci_addr.devid != owner_pci->devid ||
2222 [ # # ]: 0 : ibdev_pci_addr.function != owner_pci->function) {
2223 : : return -1;
2224 : : }
2225 : : /* Check if IB device has MPESW enabled. */
2226 [ # # ]: 0 : if (mlx5_is_mpesw_enabled(ibv, &ibdev_pci_addr, &enabled, dev_info))
2227 : : return -1;
2228 [ # # ]: 0 : if (!enabled)
2229 : : return -1;
2230 : : /* Iterate through IB ports to find MPESW master uplink port. */
2231 [ # # ]: 0 : if (nl_rdma < 0)
2232 : : return -1;
2233 : 0 : np = mlx5_nl_portnum(nl_rdma, ibv->name, dev_info);
2234 [ # # ]: 0 : if (!np)
2235 : : return -1;
2236 [ # # ]: 0 : for (i = 1; i <= np; ++i) {
2237 : 0 : struct rte_pci_addr pci_addr = { 0 };
2238 : : FILE *file;
2239 : : char port_name[IF_NAMESIZE + 1];
2240 : : struct mlx5_switch_info info;
2241 : :
2242 : : /* Check whether IB port has a corresponding netdev. */
2243 : 0 : ifindex = mlx5_nl_ifindex(nl_rdma, ibv->name, i, dev_info);
2244 [ # # ]: 0 : if (!ifindex)
2245 : 0 : continue;
2246 [ # # ]: 0 : if (!if_indextoname(ifindex, ifname))
2247 : 0 : continue;
2248 : : /* Read port name and determine its type. */
2249 : 0 : MKSTR(ifphysportname, "/sys/class/net/%s/phys_port_name", ifname);
2250 : 0 : file = fopen(ifphysportname, "rb");
2251 [ # # ]: 0 : if (!file)
2252 : 0 : continue;
2253 : 0 : ret = fscanf(file, "%16s", port_name);
2254 : 0 : fclose(file);
2255 [ # # ]: 0 : if (ret != 1)
2256 : 0 : continue;
2257 : : memset(&info, 0, sizeof(info));
2258 : 0 : mlx5_translate_port_name(port_name, &info);
2259 [ # # ]: 0 : if (info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2260 : 0 : continue;
2261 : : /* Fetch PCI address of the device to which the netdev is bound. */
2262 : 0 : MKSTR(ifpath, "/sys/class/net/%s", ifname);
2263 [ # # ]: 0 : if (mlx5_get_pci_addr(ifpath, &pci_addr))
2264 : 0 : continue;
2265 [ # # ]: 0 : if (pci_addr.domain == ibdev_pci_addr.domain &&
2266 : : pci_addr.bus == ibdev_pci_addr.bus &&
2267 [ # # ]: 0 : pci_addr.devid == ibdev_pci_addr.devid &&
2268 : : pci_addr.function == ibdev_pci_addr.function) {
2269 : : MLX5_ASSERT(info.port_name >= 0);
2270 : 0 : return info.port_name;
2271 : : }
2272 : : }
2273 : : /* No matching MPESW uplink port was found. */
2274 : : return -1;
2275 : : }
2276 : :
2277 : : /**
2278 : : * Register a PCI device within bonding.
2279 : : *
2280 : : * This function spawns Ethernet devices out of a given PCI device and
2281 : : * bonding owner PF index.
2282 : : *
2283 : : * @param[in] cdev
2284 : : * Pointer to common mlx5 device structure.
2285 : : * @param[in] req_eth_da
2286 : : * Requested ethdev device argument.
2287 : : * @param[in] owner_id
2288 : : * Requested owner PF port ID within bonding device, default to 0.
2289 : : * @param[in, out] mkvlist
2290 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
2291 : : *
2292 : : * @return
2293 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
2294 : : */
2295 : : static int
2296 : 0 : mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
2297 : : struct rte_eth_devargs *req_eth_da,
2298 : : uint16_t owner_id, struct mlx5_kvargs_ctrl *mkvlist)
2299 : 0 : {
2300 : : struct ibv_device **ibv_list;
2301 : : /*
2302 : : * Number of found IB Devices matching with requested PCI BDF.
2303 : : * nd != 1 means there are multiple IB devices over the same
2304 : : * PCI device and we have representors and master.
2305 : : */
2306 : : unsigned int nd = 0;
2307 : : /*
2308 : : * Number of found IB device Ports. nd = 1 and np = 1..n means
2309 : : * we have the single multiport IB device, and there may be
2310 : : * representors attached to some of found ports.
2311 : : */
2312 : : unsigned int np = 0;
2313 : : /*
2314 : : * Number of DPDK ethernet devices to Spawn - either over
2315 : : * multiple IB devices or multiple ports of single IB device.
2316 : : * Actually this is the number of iterations to spawn.
2317 : : */
2318 : : unsigned int ns = 0;
2319 : : /*
2320 : : * Bonding device
2321 : : * < 0 - no bonding device (single one)
2322 : : * >= 0 - bonding device (value is slave PF index)
2323 : : */
2324 : : int bd = -1;
2325 : : /*
2326 : : * Multiport E-Switch (MPESW) device:
2327 : : * < 0 - no MPESW device or could not determine if it is MPESW device,
2328 : : * >= 0 - MPESW device. Value is the port index of the MPESW owner.
2329 : : */
2330 : : int mpesw = MLX5_MPESW_PORT_INVALID;
2331 : 0 : struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
2332 : : struct mlx5_dev_spawn_data *list = NULL;
2333 : 0 : struct rte_eth_devargs eth_da = *req_eth_da;
2334 : 0 : struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
2335 : : struct mlx5_bond_info bond_info;
2336 : 0 : int ret = -1;
2337 : :
2338 : 0 : errno = 0;
2339 : 0 : ibv_list = mlx5_glue->get_device_list(&ret);
2340 [ # # ]: 0 : if (!ibv_list) {
2341 [ # # ]: 0 : rte_errno = errno ? errno : ENOSYS;
2342 : 0 : DRV_LOG(ERR, "Cannot list devices, is ib_uverbs loaded?");
2343 : 0 : return -rte_errno;
2344 : : }
2345 : : /*
2346 : : * First scan the list of all Infiniband devices to find
2347 : : * matching ones, gathering into the list.
2348 : : */
2349 : 0 : struct ibv_device *ibv_match[ret + 1];
2350 : 0 : struct mlx5_dev_info *info, tmp_info[ret];
2351 : 0 : int nl_route = mlx5_nl_init(NETLINK_ROUTE, 0);
2352 : 0 : int nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
2353 : : unsigned int i;
2354 : :
2355 : : memset(tmp_info, 0, sizeof(tmp_info));
2356 [ # # ]: 0 : while (ret-- > 0) {
2357 : : struct rte_pci_addr pci_addr;
2358 : :
2359 [ # # # # ]: 0 : if (cdev->config.probe_opt && cdev->dev_info.port_num) {
2360 [ # # ]: 0 : if (strcmp(ibv_list[ret]->name, cdev->dev_info.ibname)) {
2361 : 0 : DRV_LOG(INFO, "Unmatched caching device \"%s\" \"%s\"",
2362 : : cdev->dev_info.ibname, ibv_list[ret]->name);
2363 : 0 : continue;
2364 : : }
2365 : 0 : info = &cdev->dev_info;
2366 : : } else {
2367 : 0 : info = &tmp_info[ret];
2368 : : }
2369 : 0 : DRV_LOG(DEBUG, "Checking device \"%s\"", ibv_list[ret]->name);
2370 : 0 : bd = mlx5_device_bond_pci_match(ibv_list[ret]->name, &owner_pci,
2371 : : nl_rdma, owner_id,
2372 : : info,
2373 : : &bond_info);
2374 [ # # ]: 0 : if (bd >= 0) {
2375 : : /*
2376 : : * Bonding device detected. Only one match is allowed,
2377 : : * the bonding is supported over multi-port IB device,
2378 : : * there should be no matches on representor PCI
2379 : : * functions or non VF LAG bonding devices with
2380 : : * specified address.
2381 : : */
2382 [ # # ]: 0 : if (nd) {
2383 : 0 : DRV_LOG(ERR,
2384 : : "multiple PCI match on bonding device"
2385 : : "\"%s\" found", ibv_list[ret]->name);
2386 : 0 : rte_errno = ENOENT;
2387 : 0 : ret = -rte_errno;
2388 : 0 : goto exit;
2389 : : }
2390 : : /* Amend owner pci address if owner PF ID specified. */
2391 [ # # ]: 0 : if (eth_da.nb_representor_ports)
2392 : 0 : owner_pci.function += owner_id;
2393 : 0 : DRV_LOG(INFO,
2394 : : "PCI information matches for slave %d bonding device \"%s\"",
2395 : : bd, ibv_list[ret]->name);
2396 : 0 : ibv_match[nd++] = ibv_list[ret];
2397 : 0 : break;
2398 : : }
2399 : 0 : mpesw = mlx5_device_mpesw_pci_match(ibv_list[ret], &owner_pci, nl_rdma,
2400 : : info);
2401 [ # # ]: 0 : if (mpesw >= 0) {
2402 : : /*
2403 : : * MPESW device detected. Only one matching IB device is allowed,
2404 : : * so if any matches were found previously, fail gracefully.
2405 : : */
2406 [ # # ]: 0 : if (nd) {
2407 : 0 : DRV_LOG(ERR,
2408 : : "PCI information matches MPESW device \"%s\", "
2409 : : "but multiple matching PCI devices were found. "
2410 : : "Probing failed.",
2411 : : ibv_list[ret]->name);
2412 : 0 : rte_errno = ENOENT;
2413 : 0 : ret = -rte_errno;
2414 : 0 : goto exit;
2415 : : }
2416 : 0 : DRV_LOG(INFO,
2417 : : "PCI information matches MPESW device \"%s\"",
2418 : : ibv_list[ret]->name);
2419 : 0 : ibv_match[nd++] = ibv_list[ret];
2420 : 0 : break;
2421 : : }
2422 : : /* Bonding or MPESW device was not found. */
2423 [ # # ]: 0 : if (mlx5_get_pci_addr(ibv_list[ret]->ibdev_path,
2424 : : &pci_addr)) {
2425 [ # # ]: 0 : if (tmp_info[ret].port_info != NULL)
2426 : 0 : mlx5_free(tmp_info[ret].port_info);
2427 : 0 : memset(&tmp_info[ret], 0, sizeof(tmp_info[0]));
2428 : 0 : continue;
2429 : : }
2430 [ # # ]: 0 : if (rte_pci_addr_cmp(&owner_pci, &pci_addr) != 0) {
2431 [ # # ]: 0 : if (tmp_info[ret].port_info != NULL)
2432 : 0 : mlx5_free(tmp_info[ret].port_info);
2433 : 0 : memset(&tmp_info[ret], 0, sizeof(tmp_info[0]));
2434 : 0 : continue;
2435 : : }
2436 : 0 : DRV_LOG(INFO, "PCI information matches for device \"%s\"",
2437 : : ibv_list[ret]->name);
2438 : 0 : ibv_match[nd++] = ibv_list[ret];
2439 : : }
2440 : 0 : ibv_match[nd] = NULL;
2441 [ # # ]: 0 : if (!nd) {
2442 : : /* No device matches, just complain and bail out. */
2443 : 0 : DRV_LOG(WARNING,
2444 : : "PF %u doesn't have Verbs device matches PCI device " PCI_PRI_FMT ","
2445 : : " are kernel drivers loaded?",
2446 : : owner_id, owner_pci.domain, owner_pci.bus,
2447 : : owner_pci.devid, owner_pci.function);
2448 : 0 : rte_errno = ENOENT;
2449 : 0 : ret = -rte_errno;
2450 : 0 : goto exit;
2451 : : }
2452 [ # # ]: 0 : if (nd == 1) {
2453 [ # # ]: 0 : if (!cdev->dev_info.port_num) {
2454 [ # # ]: 0 : for (i = 0; i < RTE_DIM(tmp_info); i++) {
2455 [ # # ]: 0 : if (tmp_info[i].port_num) {
2456 : 0 : cdev->dev_info = tmp_info[i];
2457 : 0 : break;
2458 : : }
2459 : : }
2460 : : }
2461 : : /*
2462 : : * Found single matching device may have multiple ports.
2463 : : * Each port may be representor, we have to check the port
2464 : : * number and check the representors existence.
2465 : : */
2466 [ # # ]: 0 : if (nl_rdma >= 0)
2467 : 0 : np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name, &cdev->dev_info);
2468 [ # # ]: 0 : if (!np)
2469 : 0 : DRV_LOG(WARNING,
2470 : : "Cannot get IB device \"%s\" ports number.",
2471 : : ibv_match[0]->name);
2472 [ # # ]: 0 : if (bd >= 0 && !np) {
2473 : 0 : DRV_LOG(ERR, "Cannot get ports for bonding device.");
2474 : 0 : rte_errno = ENOENT;
2475 : 0 : ret = -rte_errno;
2476 : 0 : goto exit;
2477 : : }
2478 [ # # ]: 0 : if (mpesw >= 0 && !np) {
2479 : 0 : DRV_LOG(ERR, "Cannot get ports for MPESW device.");
2480 : 0 : rte_errno = ENOENT;
2481 : 0 : ret = -rte_errno;
2482 : 0 : goto exit;
2483 : : }
2484 : : } else {
2485 : : /* Can't handle one common device with multiple IB devices caching */
2486 [ # # ]: 0 : for (i = 0; i < RTE_DIM(tmp_info); i++) {
2487 [ # # ]: 0 : if (tmp_info[i].port_info != NULL)
2488 : 0 : mlx5_free(tmp_info[i].port_info);
2489 : 0 : memset(&tmp_info[i], 0, sizeof(tmp_info[0]));
2490 : : }
2491 : 0 : DRV_LOG(INFO, "Cannot handle multiple IB devices info caching in single common device.");
2492 : : }
2493 : : /* Now we can determine the maximal amount of devices to be spawned. */
2494 [ # # ]: 0 : list = mlx5_malloc(MLX5_MEM_ZERO,
2495 : 0 : sizeof(struct mlx5_dev_spawn_data) * (np ? np : nd),
2496 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
2497 [ # # ]: 0 : if (!list) {
2498 : 0 : DRV_LOG(ERR, "Spawn data array allocation failure.");
2499 : 0 : rte_errno = ENOMEM;
2500 : 0 : ret = -rte_errno;
2501 : 0 : goto exit;
2502 : : }
2503 [ # # # # ]: 0 : if (bd >= 0 || mpesw >= 0 || np > 1) {
2504 : : /*
2505 : : * Single IB device with multiple ports found,
2506 : : * it may be E-Switch master device and representors.
2507 : : * We have to perform identification through the ports.
2508 : : */
2509 : : MLX5_ASSERT(nl_rdma >= 0);
2510 : : MLX5_ASSERT(ns == 0);
2511 : : MLX5_ASSERT(nd == 1);
2512 : : MLX5_ASSERT(np);
2513 [ # # ]: 0 : for (i = 1; i <= np; ++i) {
2514 : 0 : list[ns].bond_info = &bond_info;
2515 : 0 : list[ns].max_port = np;
2516 : 0 : list[ns].phys_port = i;
2517 : 0 : list[ns].phys_dev_name = ibv_match[0]->name;
2518 : 0 : list[ns].eth_dev = NULL;
2519 : 0 : list[ns].pci_dev = pci_dev;
2520 : 0 : list[ns].cdev = cdev;
2521 : 0 : list[ns].pf_bond = bd;
2522 : 0 : list[ns].mpesw_port = MLX5_MPESW_PORT_INVALID;
2523 : 0 : list[ns].ifindex = mlx5_nl_ifindex(nl_rdma,
2524 : : ibv_match[0]->name,
2525 : : i, &cdev->dev_info);
2526 [ # # ]: 0 : if (!list[ns].ifindex) {
2527 : : /*
2528 : : * No network interface index found for the
2529 : : * specified port, it means there is no
2530 : : * representor on this port. It's OK,
2531 : : * there can be disabled ports, for example
2532 : : * if sriov_numvfs < sriov_totalvfs.
2533 : : */
2534 : 0 : continue;
2535 : : }
2536 : 0 : ret = -1;
2537 [ # # ]: 0 : if (nl_route >= 0)
2538 : 0 : ret = mlx5_nl_switch_info(nl_route,
2539 : : list[ns].ifindex,
2540 : : &list[ns].info);
2541 [ # # # # ]: 0 : if (ret || (!list[ns].info.representor &&
2542 : : !list[ns].info.master)) {
2543 : : /*
2544 : : * We failed to recognize representors with
2545 : : * Netlink, let's try to perform the task
2546 : : * with sysfs.
2547 : : */
2548 : 0 : ret = mlx5_sysfs_switch_info(list[ns].ifindex,
2549 : : &list[ns].info);
2550 : : }
2551 [ # # # # ]: 0 : if (!ret && bd >= 0) {
2552 [ # # # ]: 0 : switch (list[ns].info.name_type) {
2553 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2554 [ # # ]: 0 : if (np == 1) {
2555 : : /*
2556 : : * Force standalone bonding
2557 : : * device for ROCE LAG
2558 : : * configurations.
2559 : : */
2560 : 0 : list[ns].info.master = 0;
2561 : 0 : list[ns].info.representor = 0;
2562 : : }
2563 : 0 : ns++;
2564 : 0 : break;
2565 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
2566 : : /* Fallthrough */
2567 : : case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2568 : : /* Fallthrough */
2569 : : case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
2570 [ # # ]: 0 : if (list[ns].info.pf_num == bd)
2571 : 0 : ns++;
2572 : : break;
2573 : : default:
2574 : : break;
2575 : : }
2576 : 0 : continue;
2577 : : }
2578 [ # # # # ]: 0 : if (!ret && mpesw >= 0) {
2579 [ # # # ]: 0 : switch (list[ns].info.name_type) {
2580 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2581 : : /* Owner port is treated as master port. */
2582 [ # # ]: 0 : if (list[ns].info.port_name == mpesw) {
2583 : 0 : list[ns].info.master = 1;
2584 : 0 : list[ns].info.representor = 0;
2585 : : } else {
2586 : 0 : list[ns].info.master = 0;
2587 : 0 : list[ns].info.representor = 1;
2588 : : }
2589 : : /*
2590 : : * Ports of this type have uplink port index
2591 : : * encoded in the name. This index is also a PF index.
2592 : : */
2593 : 0 : list[ns].info.pf_num = list[ns].info.port_name;
2594 : 0 : list[ns].mpesw_port = list[ns].info.port_name;
2595 : 0 : list[ns].info.mpesw_owner = mpesw;
2596 : 0 : ns++;
2597 : 0 : break;
2598 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
2599 : : case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2600 : : case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
2601 : : /* Only spawn representors related to the probed PF. */
2602 [ # # ]: 0 : if (list[ns].info.pf_num == owner_id) {
2603 : : /*
2604 : : * Ports of this type have PF index encoded in name,
2605 : : * which translate to the related uplink port index.
2606 : : */
2607 : 0 : list[ns].mpesw_port = list[ns].info.pf_num;
2608 : : /* MPESW owner is also saved but not used now. */
2609 : 0 : list[ns].info.mpesw_owner = mpesw;
2610 : 0 : ns++;
2611 : : }
2612 : : break;
2613 : : default:
2614 : : break;
2615 : : }
2616 : 0 : continue;
2617 : : }
2618 [ # # ]: 0 : if (!ret && (list[ns].info.representor ^
2619 [ # # ]: 0 : list[ns].info.master))
2620 : 0 : ns++;
2621 : : }
2622 : : } else {
2623 : : /*
2624 : : * The existence of several matching entries (nd > 1) means
2625 : : * port representors have been instantiated. No existing Verbs
2626 : : * call nor sysfs entries can tell them apart, this can only
2627 : : * be done through Netlink calls assuming kernel drivers are
2628 : : * recent enough to support them.
2629 : : *
2630 : : * In the event of identification failure through Netlink,
2631 : : * try again through sysfs, then:
2632 : : *
2633 : : * 1. A single IB device matches (nd == 1) with single
2634 : : * port (np=0/1) and is not a representor, assume
2635 : : * no switch support.
2636 : : *
2637 : : * 2. Otherwise no safe assumptions can be made;
2638 : : * complain louder and bail out.
2639 : : */
2640 [ # # ]: 0 : for (i = 0; i != nd; ++i) {
2641 [ # # ]: 0 : memset(&list[ns].info, 0, sizeof(list[ns].info));
2642 : 0 : list[ns].bond_info = NULL;
2643 : 0 : list[ns].max_port = 1;
2644 : 0 : list[ns].phys_port = 1;
2645 : 0 : list[ns].phys_dev_name = ibv_match[i]->name;
2646 : 0 : list[ns].eth_dev = NULL;
2647 : 0 : list[ns].pci_dev = pci_dev;
2648 : 0 : list[ns].cdev = cdev;
2649 : 0 : list[ns].pf_bond = -1;
2650 : 0 : list[ns].mpesw_port = MLX5_MPESW_PORT_INVALID;
2651 : 0 : list[ns].ifindex = 0;
2652 [ # # ]: 0 : if (nl_rdma >= 0)
2653 : 0 : list[ns].ifindex = mlx5_nl_ifindex
2654 : : (nl_rdma,
2655 : : ibv_match[i]->name,
2656 : : 1, &cdev->dev_info);
2657 [ # # ]: 0 : if (!list[ns].ifindex) {
2658 : : char ifname[IF_NAMESIZE];
2659 : :
2660 : : /*
2661 : : * Netlink failed, it may happen with old
2662 : : * ib_core kernel driver (before 4.16).
2663 : : * We can assume there is old driver because
2664 : : * here we are processing single ports IB
2665 : : * devices. Let's try sysfs to retrieve
2666 : : * the ifindex. The method works for
2667 : : * master device only.
2668 : : */
2669 [ # # ]: 0 : if (nd > 1) {
2670 : : /*
2671 : : * Multiple devices found, assume
2672 : : * representors, can not distinguish
2673 : : * master/representor and retrieve
2674 : : * ifindex via sysfs.
2675 : : */
2676 : 0 : continue;
2677 : : }
2678 : 0 : ret = mlx5_get_ifname_sysfs
2679 : 0 : (ibv_match[i]->ibdev_path, ifname);
2680 [ # # ]: 0 : if (!ret)
2681 : 0 : list[ns].ifindex =
2682 : 0 : if_nametoindex(ifname);
2683 [ # # ]: 0 : if (!list[ns].ifindex) {
2684 : : /*
2685 : : * No network interface index found
2686 : : * for the specified device, it means
2687 : : * there it is neither representor
2688 : : * nor master.
2689 : : */
2690 : 0 : continue;
2691 : : }
2692 : : }
2693 : 0 : ret = -1;
2694 [ # # ]: 0 : if (nl_route >= 0)
2695 : 0 : ret = mlx5_nl_switch_info(nl_route,
2696 : : list[ns].ifindex,
2697 : : &list[ns].info);
2698 [ # # # # ]: 0 : if (ret || (!list[ns].info.representor &&
2699 : : !list[ns].info.master)) {
2700 : : /*
2701 : : * We failed to recognize representors with
2702 : : * Netlink, let's try to perform the task
2703 : : * with sysfs.
2704 : : */
2705 : 0 : ret = mlx5_sysfs_switch_info(list[ns].ifindex,
2706 : : &list[ns].info);
2707 : : }
2708 [ # # ]: 0 : if (!ret && (list[ns].info.representor ^
2709 [ # # ]: 0 : list[ns].info.master)) {
2710 : 0 : ns++;
2711 [ # # ]: 0 : } else if ((nd == 1) &&
2712 [ # # ]: 0 : !list[ns].info.representor &&
2713 : : !list[ns].info.master) {
2714 : : /*
2715 : : * Single IB device with one physical port and
2716 : : * attached network device.
2717 : : * May be SRIOV is not enabled or there is no
2718 : : * representors.
2719 : : */
2720 : 0 : DRV_LOG(INFO, "No E-Switch support detected.");
2721 : 0 : ns++;
2722 : 0 : break;
2723 : : }
2724 : : }
2725 [ # # ]: 0 : if (!ns) {
2726 : 0 : DRV_LOG(ERR,
2727 : : "Unable to recognize master/representors on the multiple IB devices.");
2728 : 0 : rte_errno = ENOENT;
2729 : 0 : ret = -rte_errno;
2730 : 0 : goto exit;
2731 : : }
2732 : : /*
2733 : : * New kernels may add the switch_id attribute for the case
2734 : : * there is no E-Switch and we wrongly recognized the only
2735 : : * device as master. Override this if there is the single
2736 : : * device with single port and new device name format present.
2737 : : */
2738 [ # # ]: 0 : if (nd == 1 &&
2739 [ # # ]: 0 : list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
2740 : 0 : list[0].info.master = 0;
2741 : 0 : list[0].info.representor = 0;
2742 : : }
2743 : : }
2744 : : MLX5_ASSERT(ns);
2745 : : /*
2746 : : * Sort list to probe devices in natural order for users convenience
2747 : : * (i.e. master first, then representors from lowest to highest ID).
2748 : : */
2749 : 0 : qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2750 [ # # ]: 0 : if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) {
2751 : : /* Set devargs default values. */
2752 [ # # ]: 0 : if (eth_da.nb_mh_controllers == 0) {
2753 : 0 : eth_da.nb_mh_controllers = 1;
2754 : 0 : eth_da.mh_controllers[0] = 0;
2755 : : }
2756 [ # # # # ]: 0 : if (eth_da.nb_ports == 0 && ns > 0) {
2757 [ # # # # ]: 0 : if (list[0].pf_bond >= 0 && list[0].info.representor)
2758 : 0 : DRV_LOG(WARNING, "Representor on Bonding device should use pf#vf# syntax: %s",
2759 : : pci_dev->device.devargs->args);
2760 : 0 : eth_da.nb_ports = 1;
2761 : 0 : eth_da.ports[0] = list[0].info.pf_num;
2762 : : }
2763 [ # # ]: 0 : if (eth_da.nb_representor_ports == 0) {
2764 : 0 : eth_da.nb_representor_ports = 1;
2765 : 0 : eth_da.representor_ports[0] = 0;
2766 : : }
2767 : : }
2768 [ # # ]: 0 : for (i = 0; i != ns; ++i) {
2769 : : uint32_t restore;
2770 : :
2771 : 0 : list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], ð_da,
2772 : : mkvlist);
2773 [ # # ]: 0 : if (!list[i].eth_dev) {
2774 [ # # ]: 0 : if (rte_errno != EBUSY && rte_errno != EEXIST)
2775 : : break;
2776 : : /* Device is disabled or already spawned. Ignore it. */
2777 : 0 : continue;
2778 : : }
2779 : 0 : restore = list[i].eth_dev->data->dev_flags;
2780 : 0 : rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2781 : : /**
2782 : : * Each representor has a dedicated interrupts vector.
2783 : : * rte_eth_copy_pci_info() assigns PF interrupts handle to
2784 : : * representor eth_dev object because representor and PF
2785 : : * share the same PCI address.
2786 : : * Override representor device with a dedicated
2787 : : * interrupts handle here.
2788 : : * Representor interrupts handle is released in mlx5_dev_stop().
2789 : : */
2790 [ # # ]: 0 : if (list[i].info.representor) {
2791 : : struct rte_intr_handle *intr_handle =
2792 : 0 : rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
2793 [ # # ]: 0 : if (intr_handle == NULL) {
2794 : 0 : DRV_LOG(ERR,
2795 : : "port %u failed to allocate memory for interrupt handler "
2796 : : "Rx interrupts will not be supported",
2797 : : i);
2798 : 0 : rte_errno = ENOMEM;
2799 : 0 : ret = -rte_errno;
2800 : 0 : goto exit;
2801 : : }
2802 : 0 : list[i].eth_dev->intr_handle = intr_handle;
2803 : : }
2804 : : /* Restore non-PCI flags cleared by the above call. */
2805 : 0 : list[i].eth_dev->data->dev_flags |= restore;
2806 : 0 : rte_eth_dev_probing_finish(list[i].eth_dev);
2807 : : }
2808 [ # # ]: 0 : if (i != ns) {
2809 : 0 : DRV_LOG(ERR,
2810 : : "probe of PCI device " PCI_PRI_FMT " aborted after"
2811 : : " encountering an error: %s",
2812 : : owner_pci.domain, owner_pci.bus,
2813 : : owner_pci.devid, owner_pci.function,
2814 : : strerror(rte_errno));
2815 : 0 : ret = -rte_errno;
2816 : : /* Roll back. */
2817 [ # # ]: 0 : while (i--) {
2818 [ # # ]: 0 : if (!list[i].eth_dev)
2819 : 0 : continue;
2820 : 0 : mlx5_dev_close(list[i].eth_dev);
2821 : : /* mac_addrs must not be freed because in dev_private */
2822 : 0 : list[i].eth_dev->data->mac_addrs = NULL;
2823 : 0 : claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2824 : : }
2825 : : /* Restore original error. */
2826 : 0 : rte_errno = -ret;
2827 : : } else {
2828 : 0 : ret = 0;
2829 : : }
2830 : 0 : exit:
2831 : : /*
2832 : : * Do the routine cleanup:
2833 : : * - close opened Netlink sockets
2834 : : * - free allocated spawn data array
2835 : : * - free the Infiniband device list
2836 : : */
2837 [ # # ]: 0 : if (nl_rdma >= 0)
2838 : 0 : close(nl_rdma);
2839 [ # # ]: 0 : if (nl_route >= 0)
2840 : 0 : close(nl_route);
2841 [ # # ]: 0 : if (list)
2842 : 0 : mlx5_free(list);
2843 : : MLX5_ASSERT(ibv_list);
2844 : 0 : mlx5_glue->free_device_list(ibv_list);
2845 [ # # ]: 0 : if (ret) {
2846 [ # # ]: 0 : if (cdev->dev_info.port_info != NULL)
2847 : 0 : mlx5_free(cdev->dev_info.port_info);
2848 : 0 : memset(&cdev->dev_info, 0, sizeof(cdev->dev_info));
2849 : : }
2850 : 0 : return ret;
2851 : : }
2852 : :
2853 : : static int
2854 : 0 : mlx5_os_parse_eth_devargs(struct rte_device *dev,
2855 : : struct rte_eth_devargs *eth_da)
2856 : : {
2857 : : int ret = 0;
2858 : :
2859 [ # # ]: 0 : if (dev->devargs == NULL)
2860 : : return 0;
2861 : : memset(eth_da, 0, sizeof(*eth_da));
2862 : : /* Parse representor information first from class argument. */
2863 [ # # ]: 0 : if (dev->devargs->cls_str)
2864 : 0 : ret = rte_eth_devargs_parse(dev->devargs->cls_str, eth_da, 1);
2865 [ # # ]: 0 : if (ret < 0) {
2866 : 0 : DRV_LOG(ERR, "failed to parse device arguments: %s",
2867 : : dev->devargs->cls_str);
2868 : 0 : return -rte_errno;
2869 : : }
2870 [ # # # # ]: 0 : if (eth_da->type == RTE_ETH_REPRESENTOR_NONE && dev->devargs->args) {
2871 : : /* Parse legacy device argument */
2872 : 0 : ret = rte_eth_devargs_parse(dev->devargs->args, eth_da, 1);
2873 [ # # ]: 0 : if (ret < 0) {
2874 : 0 : DRV_LOG(ERR, "failed to parse device arguments: %s",
2875 : : dev->devargs->args);
2876 : 0 : return -rte_errno;
2877 : : }
2878 : : }
2879 : : return 0;
2880 : : }
2881 : :
2882 : : /**
2883 : : * Callback to register a PCI device.
2884 : : *
2885 : : * This function spawns Ethernet devices out of a given PCI device.
2886 : : *
2887 : : * @param[in] cdev
2888 : : * Pointer to common mlx5 device structure.
2889 : : * @param[in, out] mkvlist
2890 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
2891 : : *
2892 : : * @return
2893 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
2894 : : */
2895 : : static int
2896 : 0 : mlx5_os_pci_probe(struct mlx5_common_device *cdev,
2897 : : struct mlx5_kvargs_ctrl *mkvlist)
2898 : : {
2899 : 0 : struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
2900 : 0 : struct rte_eth_devargs eth_da = { .nb_ports = 0 };
2901 : : int ret = 0;
2902 : : uint16_t p;
2903 : :
2904 : 0 : ret = mlx5_os_parse_eth_devargs(cdev->dev, ð_da);
2905 [ # # ]: 0 : if (ret != 0)
2906 : : return ret;
2907 : :
2908 [ # # ]: 0 : if (eth_da.nb_ports > 0) {
2909 : : /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
2910 [ # # ]: 0 : for (p = 0; p < eth_da.nb_ports; p++) {
2911 : 0 : ret = mlx5_os_pci_probe_pf(cdev, ð_da,
2912 : 0 : eth_da.ports[p], mkvlist);
2913 [ # # ]: 0 : if (ret) {
2914 : 0 : DRV_LOG(INFO, "Probe of PCI device " PCI_PRI_FMT " "
2915 : : "aborted due to proding failure of PF %u",
2916 : : pci_dev->addr.domain, pci_dev->addr.bus,
2917 : : pci_dev->addr.devid, pci_dev->addr.function,
2918 : : eth_da.ports[p]);
2919 : 0 : mlx5_net_remove(cdev);
2920 [ # # ]: 0 : if (p != 0)
2921 : : break;
2922 : : }
2923 : : }
2924 : : } else {
2925 : 0 : ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0, mkvlist);
2926 : : }
2927 : : return ret;
2928 : : }
2929 : :
2930 : : /* Probe a single SF device on auxiliary bus, no representor support. */
2931 : : static int
2932 : 0 : mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev,
2933 : : struct mlx5_kvargs_ctrl *mkvlist)
2934 : : {
2935 : 0 : struct rte_eth_devargs eth_da = { .nb_ports = 0 };
2936 : 0 : struct mlx5_dev_spawn_data spawn = {
2937 : : .pf_bond = -1,
2938 : : .mpesw_port = MLX5_MPESW_PORT_INVALID,
2939 : : };
2940 : 0 : struct rte_device *dev = cdev->dev;
2941 : 0 : struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev);
2942 : : struct rte_eth_dev *eth_dev;
2943 : : int ret = 0;
2944 : :
2945 : : /* Parse ethdev devargs. */
2946 : 0 : ret = mlx5_os_parse_eth_devargs(dev, ð_da);
2947 [ # # ]: 0 : if (ret != 0)
2948 : : return ret;
2949 : : /* Init spawn data. */
2950 : 0 : spawn.max_port = 1;
2951 : 0 : spawn.phys_port = 1;
2952 [ # # ]: 0 : spawn.phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
2953 : 0 : ret = mlx5_auxiliary_get_ifindex(dev->name);
2954 [ # # ]: 0 : if (ret < 0) {
2955 : 0 : DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name);
2956 : 0 : return ret;
2957 : : }
2958 : 0 : spawn.ifindex = ret;
2959 : 0 : spawn.cdev = cdev;
2960 : : /* Spawn device. */
2961 : 0 : eth_dev = mlx5_dev_spawn(dev, &spawn, ð_da, mkvlist);
2962 [ # # ]: 0 : if (eth_dev == NULL)
2963 : 0 : return -rte_errno;
2964 : : /* Post create. */
2965 : 0 : eth_dev->intr_handle = adev->intr_handle;
2966 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2967 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
2968 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_RMV;
2969 : 0 : eth_dev->data->numa_node = dev->numa_node;
2970 : : }
2971 : 0 : rte_eth_dev_probing_finish(eth_dev);
2972 : 0 : return 0;
2973 : : }
2974 : :
2975 : : /**
2976 : : * Net class driver callback to probe a device.
2977 : : *
2978 : : * This function probe PCI bus device(s) or a single SF on auxiliary bus.
2979 : : *
2980 : : * @param[in] cdev
2981 : : * Pointer to the common mlx5 device.
2982 : : * @param[in, out] mkvlist
2983 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
2984 : : *
2985 : : * @return
2986 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
2987 : : */
2988 : : int
2989 : 0 : mlx5_os_net_probe(struct mlx5_common_device *cdev,
2990 : : struct mlx5_kvargs_ctrl *mkvlist)
2991 : : {
2992 : : int ret;
2993 : :
2994 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2995 : 0 : mlx5_pmd_socket_init();
2996 : 0 : ret = mlx5_init_once();
2997 [ # # ]: 0 : if (ret) {
2998 : 0 : DRV_LOG(ERR, "Unable to init PMD global data: %s",
2999 : : strerror(rte_errno));
3000 : 0 : return -rte_errno;
3001 : : }
3002 : 0 : ret = mlx5_probe_again_args_validate(cdev, mkvlist);
3003 [ # # ]: 0 : if (ret) {
3004 : 0 : DRV_LOG(ERR, "Probe again parameters are not compatible : %s",
3005 : : strerror(rte_errno));
3006 : 0 : return -rte_errno;
3007 : : }
3008 [ # # ]: 0 : if (mlx5_dev_is_pci(cdev->dev))
3009 : 0 : return mlx5_os_pci_probe(cdev, mkvlist);
3010 : : else
3011 : 0 : return mlx5_os_auxiliary_probe(cdev, mkvlist);
3012 : : }
3013 : :
3014 : : /**
3015 : : * Cleanup resources when the last device is closed.
3016 : : */
3017 : : void
3018 : 0 : mlx5_os_net_cleanup(void)
3019 : : {
3020 : 0 : mlx5_pmd_socket_uninit();
3021 : 0 : }
3022 : :
3023 : : /**
3024 : : * Install shared asynchronous device events handler.
3025 : : * This function is implemented to support event sharing
3026 : : * between multiple ports of single IB device.
3027 : : *
3028 : : * @param sh
3029 : : * Pointer to mlx5_dev_ctx_shared object.
3030 : : */
3031 : : void
3032 : 0 : mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
3033 : : {
3034 : 0 : struct ibv_context *ctx = sh->cdev->ctx;
3035 : : int nlsk_fd;
3036 : 0 : uint8_t rdma_monitor_supp = 0;
3037 : :
3038 : 0 : sh->intr_handle = mlx5_os_interrupt_handler_create
3039 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3040 : : ctx->async_fd, mlx5_dev_interrupt_handler, sh);
3041 [ # # ]: 0 : if (!sh->intr_handle) {
3042 : 0 : DRV_LOG(ERR, "Failed to allocate intr_handle.");
3043 : 0 : return;
3044 : : }
3045 [ # # ]: 0 : if (sh->cdev->config.probe_opt &&
3046 [ # # ]: 0 : sh->cdev->dev_info.port_num > 1 &&
3047 [ # # ]: 0 : !sh->rdma_monitor_supp) {
3048 : 0 : nlsk_fd = mlx5_nl_rdma_monitor_init();
3049 [ # # ]: 0 : if (nlsk_fd < 0) {
3050 : 0 : DRV_LOG(ERR, "Failed to create a socket for RDMA Netlink events: %s",
3051 : : rte_strerror(rte_errno));
3052 : 0 : return;
3053 : : }
3054 [ # # ]: 0 : if (mlx5_nl_rdma_monitor_cap_get(nlsk_fd, &rdma_monitor_supp)) {
3055 : 0 : DRV_LOG(ERR, "Failed to query RDMA monitor support: %s",
3056 : : rte_strerror(rte_errno));
3057 : 0 : close(nlsk_fd);
3058 : 0 : return;
3059 : : }
3060 : 0 : sh->rdma_monitor_supp = rdma_monitor_supp;
3061 [ # # ]: 0 : if (sh->rdma_monitor_supp) {
3062 : 0 : sh->intr_handle_ib = mlx5_os_interrupt_handler_create
3063 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3064 : : nlsk_fd, mlx5_dev_interrupt_handler_ib, sh);
3065 [ # # ]: 0 : if (sh->intr_handle_ib == NULL) {
3066 : 0 : DRV_LOG(ERR, "Fail to allocate intr_handle");
3067 : 0 : close(nlsk_fd);
3068 : 0 : return;
3069 : : }
3070 : : } else {
3071 : 0 : close(nlsk_fd);
3072 : : }
3073 : : }
3074 : 0 : nlsk_fd = mlx5_nl_init(NETLINK_ROUTE, RTMGRP_LINK);
3075 [ # # ]: 0 : if (nlsk_fd < 0) {
3076 : 0 : DRV_LOG(ERR, "Failed to create a socket for Netlink events: %s",
3077 : : rte_strerror(rte_errno));
3078 : 0 : return;
3079 : : }
3080 : 0 : sh->intr_handle_nl = mlx5_os_interrupt_handler_create
3081 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3082 : : nlsk_fd, mlx5_dev_interrupt_handler_nl, sh);
3083 [ # # ]: 0 : if (sh->intr_handle_nl == NULL) {
3084 : 0 : DRV_LOG(ERR, "Fail to allocate intr_handle");
3085 : 0 : return;
3086 : : }
3087 [ # # ]: 0 : if (sh->cdev->config.devx) {
3088 : : #ifdef HAVE_IBV_DEVX_ASYNC
3089 : : struct mlx5dv_devx_cmd_comp *devx_comp;
3090 : :
3091 : 0 : sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
3092 : : devx_comp = sh->devx_comp;
3093 [ # # ]: 0 : if (!devx_comp) {
3094 : 0 : DRV_LOG(INFO, "failed to allocate devx_comp.");
3095 : 0 : return;
3096 : : }
3097 : 0 : sh->intr_handle_devx = mlx5_os_interrupt_handler_create
3098 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3099 : : devx_comp->fd,
3100 : : mlx5_dev_interrupt_handler_devx, sh);
3101 [ # # ]: 0 : if (!sh->intr_handle_devx) {
3102 : 0 : DRV_LOG(ERR, "Failed to allocate intr_handle.");
3103 : 0 : return;
3104 : : }
3105 : : #endif /* HAVE_IBV_DEVX_ASYNC */
3106 : : }
3107 : : }
3108 : :
3109 : : /**
3110 : : * Uninstall shared asynchronous device events handler.
3111 : : * This function is implemented to support event sharing
3112 : : * between multiple ports of single IB device.
3113 : : *
3114 : : * @param dev
3115 : : * Pointer to mlx5_dev_ctx_shared object.
3116 : : */
3117 : : void
3118 : 0 : mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
3119 : : {
3120 : : int fd;
3121 : :
3122 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle,
3123 : : mlx5_dev_interrupt_handler, sh);
3124 : 0 : fd = rte_intr_fd_get(sh->intr_handle_nl);
3125 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle_nl,
3126 : : mlx5_dev_interrupt_handler_nl, sh);
3127 [ # # ]: 0 : if (fd >= 0)
3128 : 0 : close(fd);
3129 : : #ifdef HAVE_IBV_DEVX_ASYNC
3130 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle_devx,
3131 : : mlx5_dev_interrupt_handler_devx, sh);
3132 [ # # ]: 0 : if (sh->devx_comp)
3133 : 0 : mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
3134 : : #endif
3135 : 0 : fd = rte_intr_fd_get(sh->intr_handle_ib);
3136 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle_ib,
3137 : : mlx5_dev_interrupt_handler_ib, sh);
3138 [ # # ]: 0 : if (fd >= 0)
3139 : 0 : close(fd);
3140 : 0 : }
3141 : :
3142 : : /**
3143 : : * Read statistics by a named counter.
3144 : : *
3145 : : * @param[in] priv
3146 : : * Pointer to the private device data structure.
3147 : : * @param[in] ctr_name
3148 : : * Pointer to the name of the statistic counter to read
3149 : : * @param[out] stat
3150 : : * Pointer to read statistic value.
3151 : : * @return
3152 : : * 0 on success and stat is valud, 1 if failed to read the value
3153 : : * rte_errno is set.
3154 : : *
3155 : : */
3156 : : int
3157 : 0 : mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
3158 : : uint64_t *stat)
3159 : : {
3160 : : int fd;
3161 : :
3162 [ # # ]: 0 : if (priv->sh) {
3163 [ # # ]: 0 : if (priv->q_counters != NULL &&
3164 [ # # ]: 0 : strcmp(ctr_name, "out_of_buffer") == 0) {
3165 : 0 : return mlx5_read_queue_counter(priv->q_counters, ctr_name, stat);
3166 : : }
3167 [ # # ]: 0 : if (priv->q_counter_hairpin != NULL &&
3168 [ # # ]: 0 : strcmp(ctr_name, "hairpin_out_of_buffer") == 0) {
3169 : 0 : return mlx5_read_queue_counter(priv->q_counter_hairpin, ctr_name, stat);
3170 : : }
3171 : 0 : MKSTR(path, "%s/ports/%d/hw_counters/%s",
3172 : : priv->sh->ibdev_path,
3173 : : priv->dev_port,
3174 : : ctr_name);
3175 : : fd = open(path, O_RDONLY);
3176 : : /*
3177 : : * in switchdev the file location is not per port
3178 : : * but rather in <ibdev_path>/hw_counters/<file_name>.
3179 : : */
3180 [ # # ]: 0 : if (fd == -1) {
3181 : 0 : MKSTR(path1, "%s/hw_counters/%s",
3182 : : priv->sh->ibdev_path,
3183 : : ctr_name);
3184 : : fd = open(path1, O_RDONLY);
3185 : : }
3186 [ # # ]: 0 : if (fd != -1) {
3187 : 0 : char buf[21] = {'\0'};
3188 : : ssize_t n = read(fd, buf, sizeof(buf));
3189 : :
3190 : 0 : close(fd);
3191 [ # # ]: 0 : if (n != -1) {
3192 : 0 : *stat = strtoull(buf, NULL, 10);
3193 : 0 : return 0;
3194 : : }
3195 : : }
3196 : : }
3197 : 0 : *stat = 0;
3198 : 0 : return 1;
3199 : : }
3200 : :
3201 : : /**
3202 : : * Remove a MAC address from device
3203 : : *
3204 : : * @param dev
3205 : : * Pointer to Ethernet device structure.
3206 : : * @param index
3207 : : * MAC address index.
3208 : : */
3209 : : void
3210 : 0 : mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3211 : : {
3212 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3213 : 0 : const int vf = priv->sh->dev_cap.vf;
3214 : :
3215 [ # # ]: 0 : if (vf)
3216 : 0 : mlx5_nl_mac_addr_remove(priv->nl_socket_route,
3217 : 0 : mlx5_ifindex(dev), priv->mac_own,
3218 : 0 : &dev->data->mac_addrs[index], index);
3219 : 0 : }
3220 : :
3221 : : /**
3222 : : * Adds a MAC address to the device
3223 : : *
3224 : : * @param dev
3225 : : * Pointer to Ethernet device structure.
3226 : : * @param mac_addr
3227 : : * MAC address to register.
3228 : : * @param index
3229 : : * MAC address index.
3230 : : *
3231 : : * @return
3232 : : * 0 on success, a negative errno value otherwise
3233 : : */
3234 : : int
3235 : 0 : mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
3236 : : uint32_t index)
3237 : : {
3238 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3239 : 0 : const int vf = priv->sh->dev_cap.vf;
3240 : : int ret = 0;
3241 : :
3242 [ # # ]: 0 : if (vf)
3243 : 0 : ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
3244 : 0 : mlx5_ifindex(dev), priv->mac_own,
3245 : : mac, index);
3246 : 0 : return ret;
3247 : : }
3248 : :
3249 : : /**
3250 : : * Modify a VF MAC address
3251 : : *
3252 : : * @param priv
3253 : : * Pointer to device private data.
3254 : : * @param mac_addr
3255 : : * MAC address to modify into.
3256 : : * @param iface_idx
3257 : : * Net device interface index
3258 : : * @param vf_index
3259 : : * VF index
3260 : : *
3261 : : * @return
3262 : : * 0 on success, a negative errno value otherwise
3263 : : */
3264 : : int
3265 : 0 : mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
3266 : : unsigned int iface_idx,
3267 : : struct rte_ether_addr *mac_addr,
3268 : : int vf_index)
3269 : : {
3270 : 0 : return mlx5_nl_vf_mac_addr_modify
3271 : : (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
3272 : : }
3273 : :
3274 : : /**
3275 : : * Set device promiscuous mode
3276 : : *
3277 : : * @param dev
3278 : : * Pointer to Ethernet device structure.
3279 : : * @param enable
3280 : : * 0 - promiscuous is disabled, otherwise - enabled
3281 : : *
3282 : : * @return
3283 : : * 0 on success, a negative error value otherwise
3284 : : */
3285 : : int
3286 : 0 : mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
3287 : : {
3288 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3289 : :
3290 : 0 : return mlx5_nl_promisc(priv->nl_socket_route,
3291 : : mlx5_ifindex(dev), !!enable);
3292 : : }
3293 : :
3294 : : /**
3295 : : * Set device promiscuous mode
3296 : : *
3297 : : * @param dev
3298 : : * Pointer to Ethernet device structure.
3299 : : * @param enable
3300 : : * 0 - all multicase is disabled, otherwise - enabled
3301 : : *
3302 : : * @return
3303 : : * 0 on success, a negative error value otherwise
3304 : : */
3305 : : int
3306 : 0 : mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
3307 : : {
3308 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3309 : :
3310 : 0 : return mlx5_nl_allmulti(priv->nl_socket_route,
3311 : : mlx5_ifindex(dev), !!enable);
3312 : : }
3313 : :
3314 : : /**
3315 : : * Flush device MAC addresses
3316 : : *
3317 : : * @param dev
3318 : : * Pointer to Ethernet device structure.
3319 : : *
3320 : : */
3321 : : void
3322 : 0 : mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
3323 : : {
3324 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3325 : :
3326 : 0 : mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
3327 : : dev->data->mac_addrs,
3328 : 0 : MLX5_MAX_MAC_ADDRESSES, priv->mac_own);
3329 : 0 : }
|