Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause
2 : : * Copyright 2015 6WIND S.A.
3 : : * Copyright 2020 Mellanox Technologies, Ltd
4 : : */
5 : :
6 : : #include <stddef.h>
7 : : #include <unistd.h>
8 : : #include <string.h>
9 : : #include <stdint.h>
10 : : #include <stdlib.h>
11 : : #include <errno.h>
12 : : #include <net/if.h>
13 : : #include <linux/rtnetlink.h>
14 : : #include <linux/sockios.h>
15 : : #include <linux/ethtool.h>
16 : : #include <fcntl.h>
17 : :
18 : : #include <rte_malloc.h>
19 : : #include <ethdev_driver.h>
20 : : #include <ethdev_pci.h>
21 : : #include <rte_pci.h>
22 : : #include <bus_driver.h>
23 : : #include <bus_pci_driver.h>
24 : : #include <bus_auxiliary_driver.h>
25 : : #include <rte_common.h>
26 : : #include <rte_kvargs.h>
27 : : #include <rte_rwlock.h>
28 : : #include <rte_spinlock.h>
29 : : #include <rte_string_fns.h>
30 : : #include <rte_alarm.h>
31 : : #include <rte_eal_paging.h>
32 : :
33 : : #include <mlx5_glue.h>
34 : : #include <mlx5_devx_cmds.h>
35 : : #include <mlx5_common.h>
36 : : #include <mlx5_common_mp.h>
37 : : #include <mlx5_common_mr.h>
38 : : #include <mlx5_malloc.h>
39 : :
40 : : #include "mlx5_defs.h"
41 : : #include "mlx5.h"
42 : : #include "mlx5_common_os.h"
43 : : #include "mlx5_utils.h"
44 : : #include "mlx5_rxtx.h"
45 : : #include "mlx5_rx.h"
46 : : #include "mlx5_tx.h"
47 : : #include "mlx5_autoconf.h"
48 : : #include "mlx5_flow.h"
49 : : #include "rte_pmd_mlx5.h"
50 : : #include "mlx5_verbs.h"
51 : : #include "mlx5_nl.h"
52 : : #include "mlx5_devx.h"
53 : :
54 : : #ifndef HAVE_IBV_MLX5_MOD_MPW
55 : : #define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
56 : : #define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
57 : : #endif
58 : :
59 : : #ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
60 : : #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
61 : : #endif
62 : :
63 : : static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
64 : :
65 : : /* Spinlock for mlx5_shared_data allocation. */
66 : : static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
67 : :
68 : : /* Process local data for secondary processes. */
69 : : static struct mlx5_local_data mlx5_local_data;
70 : :
71 : : /* rte flow indexed pool configuration. */
72 : : static const struct mlx5_indexed_pool_config default_icfg[] = {
73 : : {
74 : : .size = sizeof(struct rte_flow),
75 : : .trunk_size = 64,
76 : : .need_lock = 1,
77 : : .release_mem_en = 0,
78 : : .malloc = mlx5_malloc,
79 : : .free = mlx5_free,
80 : : .per_core_cache = 0,
81 : : .type = "ctl_flow_ipool",
82 : : },
83 : : {
84 : : .size = sizeof(struct rte_flow),
85 : : .trunk_size = 64,
86 : : .grow_trunk = 3,
87 : : .grow_shift = 2,
88 : : .need_lock = 1,
89 : : .release_mem_en = 0,
90 : : .malloc = mlx5_malloc,
91 : : .free = mlx5_free,
92 : : .per_core_cache = 1 << 14,
93 : : .type = "rte_flow_ipool",
94 : : },
95 : : {
96 : : .size = sizeof(struct rte_flow),
97 : : .trunk_size = 64,
98 : : .grow_trunk = 3,
99 : : .grow_shift = 2,
100 : : .need_lock = 1,
101 : : .release_mem_en = 0,
102 : : .malloc = mlx5_malloc,
103 : : .free = mlx5_free,
104 : : .per_core_cache = 0,
105 : : .type = "mcp_flow_ipool",
106 : : },
107 : : };
108 : :
109 : : /**
110 : : * Set the completion channel file descriptor interrupt as non-blocking.
111 : : *
112 : : * @param[in] rxq_obj
113 : : * Pointer to RQ channel object, which includes the channel fd
114 : : *
115 : : * @param[out] fd
116 : : * The file descriptor (representing the interrupt) used in this channel.
117 : : *
118 : : * @return
119 : : * 0 on successfully setting the fd to non-blocking, non-zero otherwise.
120 : : */
121 : : int
122 : 0 : mlx5_os_set_nonblock_channel_fd(int fd)
123 : : {
124 : : int flags;
125 : :
126 : 0 : flags = fcntl(fd, F_GETFL);
127 : 0 : return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
128 : : }
129 : :
130 : : /**
131 : : * Get mlx5 device attributes. The glue function query_device_ex() is called
132 : : * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
133 : : * device attributes from the glue out parameter.
134 : : *
135 : : * @param sh
136 : : * Pointer to shared device context.
137 : : *
138 : : * @return
139 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
140 : : */
141 : : int
142 : 0 : mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
143 : : {
144 : : int err;
145 : 0 : struct mlx5_common_device *cdev = sh->cdev;
146 : 0 : struct mlx5_hca_attr *hca_attr = &cdev->config.hca_attr;
147 : 0 : struct ibv_device_attr_ex attr_ex = { .comp_mask = 0 };
148 : 0 : struct mlx5dv_context dv_attr = { .comp_mask = 0 };
149 : :
150 : 0 : err = mlx5_glue->query_device_ex(cdev->ctx, NULL, &attr_ex);
151 [ # # ]: 0 : if (err) {
152 : 0 : rte_errno = errno;
153 : 0 : return -rte_errno;
154 : : }
155 : : #ifdef HAVE_IBV_MLX5_MOD_SWP
156 : 0 : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
157 : : #endif
158 : : #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
159 : 0 : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
160 : : #endif
161 : : #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
162 : 0 : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
163 : : #endif
164 : : #ifdef HAVE_IBV_DEVICE_ATTR_ESW_MGR_REG_C0
165 : : dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_REG_C0;
166 : : #endif
167 : 0 : err = mlx5_glue->dv_query_device(cdev->ctx, &dv_attr);
168 [ # # ]: 0 : if (err) {
169 : 0 : rte_errno = errno;
170 : 0 : return -rte_errno;
171 : : }
172 : 0 : memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
173 [ # # ]: 0 : if (mlx5_dev_is_pci(cdev->dev))
174 : 0 : sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(cdev->dev));
175 : : else
176 : 0 : sh->dev_cap.sf = 1;
177 : 0 : sh->dev_cap.max_qp_wr = attr_ex.orig_attr.max_qp_wr;
178 : 0 : sh->dev_cap.max_sge = attr_ex.orig_attr.max_sge;
179 : 0 : sh->dev_cap.max_cq = attr_ex.orig_attr.max_cq;
180 : 0 : sh->dev_cap.max_qp = attr_ex.orig_attr.max_qp;
181 : : #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
182 : 0 : sh->dev_cap.dest_tir = 1;
183 : : #endif
184 : : #if defined(HAVE_IBV_FLOW_DV_SUPPORT) && defined(HAVE_MLX5DV_DR)
185 : 0 : DRV_LOG(DEBUG, "DV flow is supported.");
186 : 0 : sh->dev_cap.dv_flow_en = 1;
187 : : #endif
188 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
189 [ # # # # ]: 0 : if (hca_attr->eswitch_manager && sh->dev_cap.dv_flow_en && sh->esw_mode)
190 : 0 : sh->dev_cap.dv_esw_en = 1;
191 : : #endif
192 : : /*
193 : : * Multi-packet send is supported by ConnectX-4 Lx PF as well
194 : : * as all ConnectX-5 devices.
195 : : */
196 [ # # ]: 0 : if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
197 [ # # ]: 0 : if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
198 : 0 : DRV_LOG(DEBUG, "Enhanced MPW is supported.");
199 : 0 : sh->dev_cap.mps = MLX5_MPW_ENHANCED;
200 : : } else {
201 : 0 : DRV_LOG(DEBUG, "MPW is supported.");
202 : 0 : sh->dev_cap.mps = MLX5_MPW;
203 : : }
204 : : } else {
205 : 0 : DRV_LOG(DEBUG, "MPW isn't supported.");
206 : 0 : sh->dev_cap.mps = MLX5_MPW_DISABLED;
207 : : }
208 : : #if (RTE_CACHE_LINE_SIZE == 128)
209 : : if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)
210 : : sh->dev_cap.cqe_comp = 1;
211 : : DRV_LOG(DEBUG, "Rx CQE 128B compression is %ssupported.",
212 : : sh->dev_cap.cqe_comp ? "" : "not ");
213 : : #else
214 : 0 : sh->dev_cap.cqe_comp = 1;
215 : : #endif
216 : : #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
217 : 0 : sh->dev_cap.mpls_en =
218 : : ((dv_attr.tunnel_offloads_caps &
219 : 0 : MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
220 : : (dv_attr.tunnel_offloads_caps &
221 : : MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
222 [ # # ]: 0 : DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported.",
223 : : sh->dev_cap.mpls_en ? "" : "not ");
224 : : #else
225 : : DRV_LOG(WARNING,
226 : : "MPLS over GRE/UDP tunnel offloading disabled due to old OFED/rdma-core version or firmware configuration");
227 : : #endif
228 : : #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
229 : : sh->dev_cap.hw_padding = !!attr_ex.rx_pad_end_addr_align;
230 : : #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
231 : 0 : sh->dev_cap.hw_padding = !!(attr_ex.device_cap_flags_ex &
232 : : IBV_DEVICE_PCI_WRITE_END_PADDING);
233 : : #endif
234 : 0 : sh->dev_cap.hw_csum =
235 : 0 : !!(attr_ex.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
236 [ # # ]: 0 : DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
237 : : sh->dev_cap.hw_csum ? "" : "not ");
238 : 0 : sh->dev_cap.hw_vlan_strip = !!(attr_ex.raw_packet_caps &
239 : : IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
240 [ # # ]: 0 : DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
241 : : (sh->dev_cap.hw_vlan_strip ? "" : "not "));
242 : 0 : sh->dev_cap.hw_fcs_strip = !!(attr_ex.raw_packet_caps &
243 : : IBV_RAW_PACKET_CAP_SCATTER_FCS);
244 : : #if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
245 : : !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
246 : : DRV_LOG(DEBUG, "Counters are not supported.");
247 : : #endif
248 : : /*
249 : : * DPDK doesn't support larger/variable indirection tables.
250 : : * Once DPDK supports it, take max size from device attr.
251 : : */
252 : 0 : sh->dev_cap.ind_table_max_size =
253 : 0 : RTE_MIN(attr_ex.rss_caps.max_rwq_indirection_table_size,
254 : : (unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
255 : 0 : DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
256 : : sh->dev_cap.ind_table_max_size);
257 [ # # ]: 0 : sh->dev_cap.tso = (attr_ex.tso_caps.max_tso > 0 &&
258 [ # # ]: 0 : (attr_ex.tso_caps.supported_qpts &
259 : : (1 << IBV_QPT_RAW_PACKET)));
260 [ # # ]: 0 : if (sh->dev_cap.tso)
261 : 0 : sh->dev_cap.tso_max_payload_sz = attr_ex.tso_caps.max_tso;
262 [ # # ]: 0 : strlcpy(sh->dev_cap.fw_ver, attr_ex.orig_attr.fw_ver,
263 : : sizeof(sh->dev_cap.fw_ver));
264 : : #ifdef HAVE_IBV_MLX5_MOD_SWP
265 [ # # ]: 0 : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
266 : 0 : sh->dev_cap.swp = dv_attr.sw_parsing_caps.sw_parsing_offloads &
267 : : (MLX5_SW_PARSING_CAP |
268 : : MLX5_SW_PARSING_CSUM_CAP |
269 : : MLX5_SW_PARSING_TSO_CAP);
270 : 0 : DRV_LOG(DEBUG, "SWP support: %u", sh->dev_cap.swp);
271 : : #endif
272 : : #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
273 [ # # ]: 0 : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
274 : : struct mlx5dv_striding_rq_caps *strd_rq_caps =
275 : : &dv_attr.striding_rq_caps;
276 : :
277 : 0 : sh->dev_cap.mprq.enabled = 1;
278 : 0 : sh->dev_cap.mprq.log_min_stride_size =
279 : 0 : strd_rq_caps->min_single_stride_log_num_of_bytes;
280 : 0 : sh->dev_cap.mprq.log_max_stride_size =
281 : 0 : strd_rq_caps->max_single_stride_log_num_of_bytes;
282 : 0 : sh->dev_cap.mprq.log_min_stride_num =
283 : 0 : strd_rq_caps->min_single_wqe_log_num_of_strides;
284 : 0 : sh->dev_cap.mprq.log_max_stride_num =
285 : 0 : strd_rq_caps->max_single_wqe_log_num_of_strides;
286 : 0 : sh->dev_cap.mprq.log_min_stride_wqe_size =
287 : 0 : cdev->config.devx ?
288 [ # # ]: 0 : hca_attr->log_min_stride_wqe_sz :
289 : : MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
290 : 0 : DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %u",
291 : : sh->dev_cap.mprq.log_min_stride_size);
292 : 0 : DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %u",
293 : : sh->dev_cap.mprq.log_max_stride_size);
294 : 0 : DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %u",
295 : : sh->dev_cap.mprq.log_min_stride_num);
296 : 0 : DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %u",
297 : : sh->dev_cap.mprq.log_max_stride_num);
298 : 0 : DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %u",
299 : : sh->dev_cap.mprq.log_min_stride_wqe_size);
300 : 0 : DRV_LOG(DEBUG, "\tsupported_qpts: %d",
301 : : strd_rq_caps->supported_qpts);
302 : 0 : DRV_LOG(DEBUG, "Device supports Multi-Packet RQ.");
303 : : }
304 : : #endif
305 : : #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
306 [ # # ]: 0 : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
307 : 0 : sh->dev_cap.tunnel_en = dv_attr.tunnel_offloads_caps &
308 : : (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
309 : : MLX5_TUNNELED_OFFLOADS_GRE_CAP |
310 : : MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
311 : : }
312 [ # # ]: 0 : if (sh->dev_cap.tunnel_en) {
313 [ # # # # : 0 : DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
# # ]
314 : : sh->dev_cap.tunnel_en &
315 : : MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
316 : : sh->dev_cap.tunnel_en &
317 : : MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
318 : : sh->dev_cap.tunnel_en &
319 : : MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
320 : : } else {
321 : 0 : DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
322 : : }
323 : : #else
324 : : DRV_LOG(WARNING,
325 : : "Tunnel offloading disabled due to old OFED/rdma-core version");
326 : : #endif
327 [ # # ]: 0 : if (!sh->cdev->config.devx)
328 : : return 0;
329 : : /* Check capabilities for Packet Pacing. */
330 : 0 : DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz.",
331 : : hca_attr->dev_freq_khz);
332 [ # # ]: 0 : DRV_LOG(DEBUG, "Packet pacing is %ssupported.",
333 : : hca_attr->qos.packet_pacing ? "" : "not ");
334 [ # # ]: 0 : DRV_LOG(DEBUG, "Cross channel ops are %ssupported.",
335 : : hca_attr->cross_channel ? "" : "not ");
336 [ # # ]: 0 : DRV_LOG(DEBUG, "WQE index ignore is %ssupported.",
337 : : hca_attr->wqe_index_ignore ? "" : "not ");
338 [ # # ]: 0 : DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported.",
339 : : hca_attr->non_wire_sq ? "" : "not ");
340 [ # # ]: 0 : DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
341 : : hca_attr->log_max_static_sq_wq ? "" : "not ",
342 : : hca_attr->log_max_static_sq_wq);
343 [ # # ]: 0 : DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported.",
344 : : hca_attr->qos.wqe_rate_pp ? "" : "not ");
345 : 0 : sh->dev_cap.txpp_en = hca_attr->qos.packet_pacing;
346 [ # # ]: 0 : if (!hca_attr->cross_channel) {
347 : 0 : DRV_LOG(DEBUG,
348 : : "Cross channel operations are required for packet pacing.");
349 : 0 : sh->dev_cap.txpp_en = 0;
350 : : }
351 [ # # ]: 0 : if (!hca_attr->wqe_index_ignore) {
352 : 0 : DRV_LOG(DEBUG,
353 : : "WQE index ignore feature is required for packet pacing.");
354 : 0 : sh->dev_cap.txpp_en = 0;
355 : : }
356 [ # # ]: 0 : if (!hca_attr->non_wire_sq) {
357 : 0 : DRV_LOG(DEBUG,
358 : : "Non-wire SQ feature is required for packet pacing.");
359 : 0 : sh->dev_cap.txpp_en = 0;
360 : : }
361 [ # # ]: 0 : if (!hca_attr->log_max_static_sq_wq) {
362 : 0 : DRV_LOG(DEBUG,
363 : : "Static WQE SQ feature is required for packet pacing.");
364 : 0 : sh->dev_cap.txpp_en = 0;
365 : : }
366 [ # # ]: 0 : if (!hca_attr->qos.wqe_rate_pp) {
367 : 0 : DRV_LOG(DEBUG,
368 : : "WQE rate mode is required for packet pacing.");
369 : 0 : sh->dev_cap.txpp_en = 0;
370 : : }
371 : : #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
372 : : DRV_LOG(DEBUG,
373 : : "DevX does not provide UAR offset, can't create queues for packet pacing.");
374 : : sh->dev_cap.txpp_en = 0;
375 : : #endif
376 : 0 : sh->dev_cap.scatter_fcs_w_decap_disable =
377 : 0 : hca_attr->scatter_fcs_w_decap_disable;
378 : 0 : sh->dev_cap.rq_delay_drop_en = hca_attr->rq_delay_drop;
379 : 0 : mlx5_rt_timestamp_config(sh, hca_attr);
380 : : #ifdef HAVE_IBV_DEVICE_ATTR_ESW_MGR_REG_C0
381 : : if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_REG_C0) {
382 : : sh->dev_cap.esw_info.regc_value = dv_attr.reg_c0.value;
383 : : sh->dev_cap.esw_info.regc_mask = dv_attr.reg_c0.mask;
384 : : }
385 : : #else
386 : 0 : sh->dev_cap.esw_info.regc_value = 0;
387 : 0 : sh->dev_cap.esw_info.regc_mask = 0;
388 : : #endif
389 : 0 : return 0;
390 : : }
391 : :
392 : : /**
393 : : * Detect misc5 support or not
394 : : *
395 : : * @param[in] priv
396 : : * Device private data pointer
397 : : */
398 : : #ifdef HAVE_MLX5DV_DR
399 : : static void
400 : 0 : __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
401 : : {
402 : : #ifdef HAVE_IBV_FLOW_DV_SUPPORT
403 : : /* Dummy VxLAN matcher to detect rdma-core misc5 cap
404 : : * Case: IPv4--->UDP--->VxLAN--->vni
405 : : */
406 : : void *tbl;
407 : : struct mlx5_flow_dv_match_params matcher_mask;
408 : : void *match_m;
409 : : void *matcher;
410 : : void *headers_m;
411 : : void *misc5_m;
412 : : uint32_t *tunnel_header_m;
413 : : struct mlx5dv_flow_matcher_attr dv_attr;
414 : :
415 : : memset(&matcher_mask, 0, sizeof(matcher_mask));
416 : 0 : matcher_mask.size = sizeof(matcher_mask.buf);
417 : : match_m = matcher_mask.buf;
418 : : headers_m = MLX5_ADDR_OF(fte_match_param, match_m, outer_headers);
419 : : misc5_m = MLX5_ADDR_OF(fte_match_param,
420 : : match_m, misc_parameters_5);
421 : : tunnel_header_m = (uint32_t *)
422 : : MLX5_ADDR_OF(fte_match_set_misc5,
423 : : misc5_m, tunnel_header_1);
424 : : MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
425 [ # # ]: 0 : MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 4);
426 : 0 : MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
427 : 0 : *tunnel_header_m = 0xffffff;
428 : :
429 : 0 : tbl = mlx5_glue->dr_create_flow_tbl(priv->sh->rx_domain, 1);
430 [ # # ]: 0 : if (!tbl) {
431 : 0 : DRV_LOG(INFO, "No SW steering support");
432 : 0 : return;
433 : : }
434 : 0 : dv_attr.type = IBV_FLOW_ATTR_NORMAL;
435 : 0 : dv_attr.match_mask = (void *)&matcher_mask;
436 : 0 : dv_attr.match_criteria_enable =
437 : : (1 << MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT) |
438 : : (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT);
439 : 0 : dv_attr.priority = 3;
440 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
441 : : void *misc2_m;
442 [ # # ]: 0 : if (priv->sh->config.dv_esw_en) {
443 : : /* FDB enabled reg_c_0 */
444 : 0 : dv_attr.match_criteria_enable |=
445 : : (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT);
446 : : misc2_m = MLX5_ADDR_OF(fte_match_param,
447 : : match_m, misc_parameters_2);
448 [ # # ]: 0 : MLX5_SET(fte_match_set_misc2, misc2_m,
449 : : metadata_reg_c_0, 0xffff);
450 : : }
451 : : #endif
452 : 0 : matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->cdev->ctx,
453 : : &dv_attr, tbl);
454 [ # # ]: 0 : if (matcher) {
455 : 0 : priv->sh->misc5_cap = 1;
456 : 0 : mlx5_glue->dv_destroy_flow_matcher(matcher);
457 : : }
458 : 0 : mlx5_glue->dr_destroy_flow_tbl(tbl);
459 : : #else
460 : : RTE_SET_USED(priv);
461 : : #endif
462 : : }
463 : : #endif
464 : :
465 : : /**
466 : : * Initialize DR related data within private structure.
467 : : * Routine checks the reference counter and does actual
468 : : * resources creation/initialization only if counter is zero.
469 : : *
470 : : * @param[in] eth_dev
471 : : * Pointer to the device.
472 : : *
473 : : * @return
474 : : * Zero on success, positive error code otherwise.
475 : : */
476 : : static int
477 : 0 : mlx5_alloc_shared_dr(struct rte_eth_dev *eth_dev)
478 : : {
479 : 0 : struct mlx5_priv *priv = eth_dev->data->dev_private;
480 : 0 : struct mlx5_dev_ctx_shared *sh = priv->sh;
481 : : char s[MLX5_NAME_SIZE] __rte_unused;
482 : : int err;
483 : :
484 : : MLX5_ASSERT(sh && sh->refcnt);
485 [ # # ]: 0 : if (sh->refcnt > 1)
486 : : return 0;
487 : 0 : err = mlx5_alloc_table_hash_list(priv);
488 [ # # ]: 0 : if (err)
489 : 0 : goto error;
490 : 0 : sh->default_miss_action =
491 : 0 : mlx5_glue->dr_create_flow_action_default_miss();
492 [ # # ]: 0 : if (!sh->default_miss_action)
493 : 0 : DRV_LOG(WARNING, "Default miss action is not supported.");
494 : : /* The resources below are only valid with DV support. */
495 : : #ifdef HAVE_IBV_FLOW_DV_SUPPORT
496 : : /* Init shared flex parsers list, no need lcore_share */
497 : 0 : snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
498 : 0 : sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
499 : : mlx5_flex_parser_create_cb,
500 : : mlx5_flex_parser_match_cb,
501 : : mlx5_flex_parser_remove_cb,
502 : : mlx5_flex_parser_clone_cb,
503 : : mlx5_flex_parser_clone_free_cb);
504 [ # # ]: 0 : if (!sh->flex_parsers_dv)
505 : 0 : goto error;
506 [ # # ]: 0 : if (priv->sh->config.dv_flow_en == 2) {
507 [ # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
508 [ # # ]: 0 : sh->dv_regc0_mask) {
509 : : /* Reuse DV callback functions. */
510 : 0 : sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
511 : : MLX5_FLOW_MREG_HTABLE_SZ,
512 : : false, true, eth_dev,
513 : : mlx5_flow_nta_mreg_create_cb,
514 : : mlx5_flow_dv_mreg_match_cb,
515 : : mlx5_flow_nta_mreg_remove_cb,
516 : : mlx5_flow_dv_mreg_clone_cb,
517 : : mlx5_flow_dv_mreg_clone_free_cb);
518 [ # # ]: 0 : if (!sh->mreg_cp_tbl) {
519 : : err = ENOMEM;
520 : 0 : goto error;
521 : : }
522 : : }
523 : 0 : return 0;
524 : : }
525 : : /* Init port id action list. */
526 : : snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
527 : 0 : sh->port_id_action_list = mlx5_list_create(s, sh, true,
528 : : mlx5_flow_dv_port_id_create_cb,
529 : : mlx5_flow_dv_port_id_match_cb,
530 : : mlx5_flow_dv_port_id_remove_cb,
531 : : mlx5_flow_dv_port_id_clone_cb,
532 : : mlx5_flow_dv_port_id_clone_free_cb);
533 [ # # ]: 0 : if (!sh->port_id_action_list)
534 : 0 : goto error;
535 : : /* Init push vlan action list. */
536 : : snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
537 : 0 : sh->push_vlan_action_list = mlx5_list_create(s, sh, true,
538 : : mlx5_flow_dv_push_vlan_create_cb,
539 : : mlx5_flow_dv_push_vlan_match_cb,
540 : : mlx5_flow_dv_push_vlan_remove_cb,
541 : : mlx5_flow_dv_push_vlan_clone_cb,
542 : : mlx5_flow_dv_push_vlan_clone_free_cb);
543 [ # # ]: 0 : if (!sh->push_vlan_action_list)
544 : 0 : goto error;
545 : : /* Init sample action list. */
546 : : snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
547 : 0 : sh->sample_action_list = mlx5_list_create(s, sh, true,
548 : : mlx5_flow_dv_sample_create_cb,
549 : : mlx5_flow_dv_sample_match_cb,
550 : : mlx5_flow_dv_sample_remove_cb,
551 : : mlx5_flow_dv_sample_clone_cb,
552 : : mlx5_flow_dv_sample_clone_free_cb);
553 [ # # ]: 0 : if (!sh->sample_action_list)
554 : 0 : goto error;
555 : : /* Init dest array action list. */
556 : : snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
557 : 0 : sh->dest_array_list = mlx5_list_create(s, sh, true,
558 : : mlx5_flow_dv_dest_array_create_cb,
559 : : mlx5_flow_dv_dest_array_match_cb,
560 : : mlx5_flow_dv_dest_array_remove_cb,
561 : : mlx5_flow_dv_dest_array_clone_cb,
562 : : mlx5_flow_dv_dest_array_clone_free_cb);
563 [ # # ]: 0 : if (!sh->dest_array_list)
564 : 0 : goto error;
565 : : #else
566 : : if (priv->sh->config.dv_flow_en == 2)
567 : : return 0;
568 : : #endif
569 : : #ifdef HAVE_MLX5DV_DR
570 : : void *domain;
571 : :
572 : : /* Reference counter is zero, we should initialize structures. */
573 : 0 : domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
574 : : MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
575 [ # # ]: 0 : if (!domain) {
576 : 0 : DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
577 : 0 : err = errno;
578 : 0 : goto error;
579 : : }
580 : 0 : sh->rx_domain = domain;
581 : 0 : domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
582 : : MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
583 [ # # ]: 0 : if (!domain) {
584 : 0 : DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
585 : 0 : err = errno;
586 : 0 : goto error;
587 : : }
588 : 0 : sh->tx_domain = domain;
589 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
590 [ # # ]: 0 : if (sh->config.dv_esw_en) {
591 : 0 : domain = mlx5_glue->dr_create_domain(sh->cdev->ctx,
592 : : MLX5DV_DR_DOMAIN_TYPE_FDB);
593 [ # # ]: 0 : if (!domain) {
594 : 0 : DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
595 : 0 : err = errno;
596 : 0 : goto error;
597 : : }
598 : 0 : sh->fdb_domain = domain;
599 : : }
600 : : /*
601 : : * The drop action is just some dummy placeholder in rdma-core. It
602 : : * does not belong to domains and has no any attributes, and, can be
603 : : * shared by the entire device.
604 : : */
605 : 0 : sh->dr_drop_action = mlx5_glue->dr_create_flow_action_drop();
606 [ # # ]: 0 : if (!sh->dr_drop_action) {
607 : 0 : DRV_LOG(ERR, "FDB mlx5dv_dr_create_flow_action_drop");
608 : 0 : err = errno;
609 : 0 : goto error;
610 : : }
611 : :
612 [ # # ]: 0 : if (sh->config.dv_flow_en == 1) {
613 : : /* Query availability of metadata reg_c's. */
614 [ # # ]: 0 : if (!priv->sh->metadata_regc_check_flag) {
615 : 0 : err = mlx5_flow_discover_mreg_c(eth_dev);
616 [ # # ]: 0 : if (err < 0) {
617 : 0 : err = -err;
618 : 0 : goto error;
619 : : }
620 : : }
621 [ # # ]: 0 : if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
622 : 0 : DRV_LOG(DEBUG,
623 : : "port %u extensive metadata register is not supported",
624 : : eth_dev->data->port_id);
625 [ # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
626 : 0 : DRV_LOG(ERR, "metadata mode %u is not supported "
627 : : "(no metadata registers available)",
628 : : sh->config.dv_xmeta_en);
629 : : err = ENOTSUP;
630 : 0 : goto error;
631 : : }
632 : : }
633 [ # # # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
634 [ # # ]: 0 : mlx5_flow_ext_mreg_supported(eth_dev) && sh->dv_regc0_mask) {
635 : 0 : sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
636 : : MLX5_FLOW_MREG_HTABLE_SZ,
637 : : false, true, eth_dev,
638 : : mlx5_flow_dv_mreg_create_cb,
639 : : mlx5_flow_dv_mreg_match_cb,
640 : : mlx5_flow_dv_mreg_remove_cb,
641 : : mlx5_flow_dv_mreg_clone_cb,
642 : : mlx5_flow_dv_mreg_clone_free_cb);
643 [ # # ]: 0 : if (!sh->mreg_cp_tbl) {
644 : : err = ENOMEM;
645 : 0 : goto error;
646 : : }
647 : : }
648 : : }
649 : : #endif
650 [ # # # # ]: 0 : if (!sh->tunnel_hub && sh->config.dv_miss_info)
651 : 0 : err = mlx5_alloc_tunnel_hub(sh);
652 [ # # ]: 0 : if (err) {
653 : 0 : DRV_LOG(ERR, "mlx5_alloc_tunnel_hub failed err=%d", err);
654 : 0 : goto error;
655 : : }
656 [ # # ]: 0 : if (sh->config.reclaim_mode == MLX5_RCM_AGGR) {
657 : 0 : mlx5_glue->dr_reclaim_domain_memory(sh->rx_domain, 1);
658 : 0 : mlx5_glue->dr_reclaim_domain_memory(sh->tx_domain, 1);
659 [ # # ]: 0 : if (sh->fdb_domain)
660 : 0 : mlx5_glue->dr_reclaim_domain_memory(sh->fdb_domain, 1);
661 : : }
662 : 0 : sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
663 [ # # ]: 0 : if (!sh->config.allow_duplicate_pattern) {
664 : : #ifndef HAVE_MLX5_DR_ALLOW_DUPLICATE
665 : : DRV_LOG(WARNING, "Disallow duplicate pattern is not supported - maybe old rdma-core version?");
666 : : #endif
667 : 0 : mlx5_glue->dr_allow_duplicate_rules(sh->rx_domain, 0);
668 : 0 : mlx5_glue->dr_allow_duplicate_rules(sh->tx_domain, 0);
669 [ # # ]: 0 : if (sh->fdb_domain)
670 : 0 : mlx5_glue->dr_allow_duplicate_rules(sh->fdb_domain, 0);
671 : : }
672 : :
673 : 0 : __mlx5_discovery_misc5_cap(priv);
674 : : #endif /* HAVE_MLX5DV_DR */
675 : 0 : LIST_INIT(&sh->shared_rxqs);
676 : 0 : return 0;
677 : 0 : error:
678 : : /* Rollback the created objects. */
679 [ # # ]: 0 : if (sh->rx_domain) {
680 : 0 : mlx5_glue->dr_destroy_domain(sh->rx_domain);
681 : 0 : sh->rx_domain = NULL;
682 : : }
683 [ # # ]: 0 : if (sh->tx_domain) {
684 : 0 : mlx5_glue->dr_destroy_domain(sh->tx_domain);
685 : 0 : sh->tx_domain = NULL;
686 : : }
687 [ # # ]: 0 : if (sh->fdb_domain) {
688 : 0 : mlx5_glue->dr_destroy_domain(sh->fdb_domain);
689 : 0 : sh->fdb_domain = NULL;
690 : : }
691 [ # # ]: 0 : if (sh->dr_drop_action) {
692 : 0 : mlx5_glue->destroy_flow_action(sh->dr_drop_action);
693 : 0 : sh->dr_drop_action = NULL;
694 : : }
695 [ # # ]: 0 : if (sh->pop_vlan_action) {
696 : 0 : mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
697 : 0 : sh->pop_vlan_action = NULL;
698 : : }
699 [ # # ]: 0 : if (sh->encaps_decaps) {
700 : 0 : mlx5_hlist_destroy(sh->encaps_decaps);
701 : 0 : sh->encaps_decaps = NULL;
702 : : }
703 [ # # ]: 0 : if (sh->modify_cmds) {
704 : 0 : mlx5_hlist_destroy(sh->modify_cmds);
705 : 0 : sh->modify_cmds = NULL;
706 : : }
707 [ # # ]: 0 : if (sh->tag_table) {
708 : : /* tags should be destroyed with flow before. */
709 : 0 : mlx5_hlist_destroy(sh->tag_table);
710 : 0 : sh->tag_table = NULL;
711 : : }
712 [ # # ]: 0 : if (sh->tunnel_hub) {
713 : 0 : mlx5_release_tunnel_hub(sh, priv->dev_port);
714 : 0 : sh->tunnel_hub = NULL;
715 : : }
716 : 0 : mlx5_free_table_hash_list(priv);
717 [ # # ]: 0 : if (sh->port_id_action_list) {
718 : 0 : mlx5_list_destroy(sh->port_id_action_list);
719 : 0 : sh->port_id_action_list = NULL;
720 : : }
721 [ # # ]: 0 : if (sh->push_vlan_action_list) {
722 : 0 : mlx5_list_destroy(sh->push_vlan_action_list);
723 : 0 : sh->push_vlan_action_list = NULL;
724 : : }
725 [ # # ]: 0 : if (sh->sample_action_list) {
726 : 0 : mlx5_list_destroy(sh->sample_action_list);
727 : 0 : sh->sample_action_list = NULL;
728 : : }
729 [ # # ]: 0 : if (sh->dest_array_list) {
730 : 0 : mlx5_list_destroy(sh->dest_array_list);
731 : 0 : sh->dest_array_list = NULL;
732 : : }
733 [ # # ]: 0 : if (sh->mreg_cp_tbl) {
734 : 0 : mlx5_hlist_destroy(sh->mreg_cp_tbl);
735 : 0 : sh->mreg_cp_tbl = NULL;
736 : : }
737 : : return err;
738 : : }
739 : :
740 : : #ifdef HAVE_MLX5DV_DR
741 : : static void
742 : 0 : mlx5_destroy_send_to_kernel_action(struct mlx5_dev_ctx_shared *sh)
743 : : {
744 : : int i;
745 : :
746 [ # # ]: 0 : for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
747 [ # # ]: 0 : if (sh->send_to_kernel_action[i].action) {
748 : : void *action = sh->send_to_kernel_action[i].action;
749 : :
750 : 0 : mlx5_glue->destroy_flow_action(action);
751 : 0 : sh->send_to_kernel_action[i].action = NULL;
752 : : }
753 [ # # ]: 0 : if (sh->send_to_kernel_action[i].tbl) {
754 : : struct mlx5_flow_tbl_resource *tbl =
755 : : sh->send_to_kernel_action[i].tbl;
756 : :
757 : 0 : mlx5_flow_dv_tbl_resource_release(sh, tbl);
758 : 0 : sh->send_to_kernel_action[i].tbl = NULL;
759 : : }
760 : : }
761 : 0 : }
762 : : #endif /* HAVE_MLX5DV_DR */
763 : :
764 : : /**
765 : : * Destroy DR related data within private structure.
766 : : *
767 : : * @param[in] priv
768 : : * Pointer to the private device data structure.
769 : : */
770 : : void
771 : 0 : mlx5_os_free_shared_dr(struct mlx5_priv *priv)
772 : : {
773 : 0 : struct mlx5_dev_ctx_shared *sh = priv->sh;
774 : : struct mlx5_rxq_ctrl *rxq_ctrl;
775 : : int i = 0;
776 : :
777 : : MLX5_ASSERT(sh && sh->refcnt);
778 [ # # ]: 0 : if (sh->refcnt > 1)
779 : : return;
780 [ # # ]: 0 : LIST_FOREACH(rxq_ctrl, &sh->shared_rxqs, next) {
781 : 0 : DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
782 : : priv->dev_data->port_id, rxq_ctrl->rxq.idx);
783 : 0 : ++i;
784 : : }
785 [ # # ]: 0 : if (i > 0)
786 : 0 : DRV_LOG(WARNING, "port %u some Rx queues still remain %d",
787 : : priv->dev_data->port_id, i);
788 : : MLX5_ASSERT(LIST_EMPTY(&sh->shared_rxqs));
789 : : #ifdef HAVE_MLX5DV_DR
790 : 0 : mlx5_destroy_send_to_kernel_action(sh);
791 [ # # ]: 0 : if (sh->rx_domain) {
792 : 0 : mlx5_glue->dr_destroy_domain(sh->rx_domain);
793 : 0 : sh->rx_domain = NULL;
794 : : }
795 [ # # ]: 0 : if (sh->tx_domain) {
796 : 0 : mlx5_glue->dr_destroy_domain(sh->tx_domain);
797 : 0 : sh->tx_domain = NULL;
798 : : }
799 : : #ifdef HAVE_MLX5DV_DR_ESWITCH
800 [ # # ]: 0 : if (sh->fdb_domain) {
801 : 0 : mlx5_glue->dr_destroy_domain(sh->fdb_domain);
802 : 0 : sh->fdb_domain = NULL;
803 : : }
804 [ # # ]: 0 : if (sh->dr_drop_action) {
805 : 0 : mlx5_glue->destroy_flow_action(sh->dr_drop_action);
806 : 0 : sh->dr_drop_action = NULL;
807 : : }
808 : : #endif
809 [ # # ]: 0 : if (sh->pop_vlan_action) {
810 : 0 : mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
811 : 0 : sh->pop_vlan_action = NULL;
812 : : }
813 [ # # ]: 0 : for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
814 [ # # ]: 0 : if (sh->send_to_kernel_action[i].action) {
815 : : void *action = sh->send_to_kernel_action[i].action;
816 : :
817 : 0 : mlx5_glue->destroy_flow_action(action);
818 : 0 : sh->send_to_kernel_action[i].action = NULL;
819 : : }
820 [ # # ]: 0 : if (sh->send_to_kernel_action[i].tbl) {
821 : : struct mlx5_flow_tbl_resource *tbl =
822 : : sh->send_to_kernel_action[i].tbl;
823 : :
824 : 0 : mlx5_flow_dv_tbl_resource_release(sh, tbl);
825 : 0 : sh->send_to_kernel_action[i].tbl = NULL;
826 : : }
827 : : }
828 : : #endif /* HAVE_MLX5DV_DR */
829 [ # # ]: 0 : if (sh->default_miss_action)
830 : 0 : mlx5_glue->destroy_flow_action
831 : : (sh->default_miss_action);
832 [ # # ]: 0 : if (sh->encaps_decaps) {
833 : 0 : mlx5_hlist_destroy(sh->encaps_decaps);
834 : 0 : sh->encaps_decaps = NULL;
835 : : }
836 [ # # ]: 0 : if (sh->modify_cmds) {
837 : 0 : mlx5_hlist_destroy(sh->modify_cmds);
838 : 0 : sh->modify_cmds = NULL;
839 : : }
840 [ # # ]: 0 : if (sh->tag_table) {
841 : : /* tags should be destroyed with flow before. */
842 : 0 : mlx5_hlist_destroy(sh->tag_table);
843 : 0 : sh->tag_table = NULL;
844 : : }
845 [ # # ]: 0 : if (sh->tunnel_hub) {
846 : 0 : mlx5_release_tunnel_hub(sh, priv->dev_port);
847 : 0 : sh->tunnel_hub = NULL;
848 : : }
849 : 0 : mlx5_free_table_hash_list(priv);
850 [ # # ]: 0 : if (sh->port_id_action_list) {
851 : 0 : mlx5_list_destroy(sh->port_id_action_list);
852 : 0 : sh->port_id_action_list = NULL;
853 : : }
854 [ # # ]: 0 : if (sh->push_vlan_action_list) {
855 : 0 : mlx5_list_destroy(sh->push_vlan_action_list);
856 : 0 : sh->push_vlan_action_list = NULL;
857 : : }
858 [ # # ]: 0 : if (sh->sample_action_list) {
859 : 0 : mlx5_list_destroy(sh->sample_action_list);
860 : 0 : sh->sample_action_list = NULL;
861 : : }
862 [ # # ]: 0 : if (sh->dest_array_list) {
863 : 0 : mlx5_list_destroy(sh->dest_array_list);
864 : 0 : sh->dest_array_list = NULL;
865 : : }
866 [ # # ]: 0 : if (sh->mreg_cp_tbl) {
867 : 0 : mlx5_hlist_destroy(sh->mreg_cp_tbl);
868 : 0 : sh->mreg_cp_tbl = NULL;
869 : : }
870 : : }
871 : :
872 : : /**
873 : : * Initialize shared data between primary and secondary process.
874 : : *
875 : : * A memzone is reserved by primary process and secondary processes attach to
876 : : * the memzone.
877 : : *
878 : : * @return
879 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
880 : : */
881 : : static int
882 : 0 : mlx5_init_shared_data(void)
883 : : {
884 : : const struct rte_memzone *mz;
885 : : int ret = 0;
886 : :
887 : : rte_spinlock_lock(&mlx5_shared_data_lock);
888 [ # # ]: 0 : if (mlx5_shared_data == NULL) {
889 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
890 : : /* Allocate shared memory. */
891 : 0 : mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
892 : : sizeof(*mlx5_shared_data),
893 : : SOCKET_ID_ANY, 0);
894 [ # # ]: 0 : if (mz == NULL) {
895 : 0 : DRV_LOG(ERR,
896 : : "Cannot allocate mlx5 shared data");
897 : 0 : ret = -rte_errno;
898 : 0 : goto error;
899 : : }
900 : 0 : mlx5_shared_data = mz->addr;
901 : : memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
902 : 0 : rte_spinlock_init(&mlx5_shared_data->lock);
903 : : } else {
904 : : /* Lookup allocated shared memory. */
905 : 0 : mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
906 [ # # ]: 0 : if (mz == NULL) {
907 : 0 : DRV_LOG(ERR,
908 : : "Cannot attach mlx5 shared data");
909 : 0 : ret = -rte_errno;
910 : 0 : goto error;
911 : : }
912 : 0 : mlx5_shared_data = mz->addr;
913 : : memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
914 : : }
915 : : }
916 : 0 : error:
917 : : rte_spinlock_unlock(&mlx5_shared_data_lock);
918 : 0 : return ret;
919 : : }
920 : :
921 : : /**
922 : : * PMD global initialization.
923 : : *
924 : : * Independent from individual device, this function initializes global
925 : : * per-PMD data structures distinguishing primary and secondary processes.
926 : : * Hence, each initialization is called once per a process.
927 : : *
928 : : * @return
929 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
930 : : */
931 : : static int
932 : 0 : mlx5_init_once(void)
933 : : {
934 : : struct mlx5_shared_data *sd;
935 : : struct mlx5_local_data *ld = &mlx5_local_data;
936 : : int ret = 0;
937 : :
938 [ # # ]: 0 : if (mlx5_init_shared_data())
939 : 0 : return -rte_errno;
940 : 0 : sd = mlx5_shared_data;
941 : : MLX5_ASSERT(sd);
942 : 0 : rte_spinlock_lock(&sd->lock);
943 [ # # # ]: 0 : switch (rte_eal_process_type()) {
944 : 0 : case RTE_PROC_PRIMARY:
945 [ # # ]: 0 : if (sd->init_done)
946 : : break;
947 : 0 : ret = mlx5_mp_init_primary(MLX5_MP_NAME,
948 : : mlx5_mp_os_primary_handle);
949 [ # # ]: 0 : if (ret)
950 : 0 : goto out;
951 : 0 : sd->init_done = true;
952 : 0 : break;
953 : 0 : case RTE_PROC_SECONDARY:
954 [ # # ]: 0 : if (ld->init_done)
955 : : break;
956 : 0 : ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
957 : : mlx5_mp_os_secondary_handle);
958 [ # # ]: 0 : if (ret)
959 : 0 : goto out;
960 : 0 : ++sd->secondary_cnt;
961 : 0 : ld->init_done = true;
962 : 0 : break;
963 : : default:
964 : : break;
965 : : }
966 : 0 : out:
967 : : rte_spinlock_unlock(&sd->lock);
968 : 0 : return ret;
969 : : }
970 : :
971 : : /**
972 : : * DR flow drop action support detect.
973 : : *
974 : : * @param dev
975 : : * Pointer to rte_eth_dev structure.
976 : : *
977 : : */
978 : : static void
979 : 0 : mlx5_flow_drop_action_config(struct rte_eth_dev *dev __rte_unused)
980 : : {
981 : : #ifdef HAVE_MLX5DV_DR
982 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
983 : :
984 [ # # # # ]: 0 : if (!priv->sh->config.dv_flow_en || !priv->sh->dr_drop_action)
985 : : return;
986 : : /**
987 : : * DR supports drop action placeholder when it is supported;
988 : : * otherwise, use the queue drop action.
989 : : */
990 [ # # ]: 0 : if (!priv->sh->drop_action_check_flag) {
991 [ # # ]: 0 : if (!mlx5_flow_discover_dr_action_support(dev))
992 : 0 : priv->sh->dr_root_drop_action_en = 1;
993 : 0 : priv->sh->drop_action_check_flag = 1;
994 : : }
995 [ # # ]: 0 : if (priv->sh->dr_root_drop_action_en)
996 : 0 : priv->root_drop_action = priv->sh->dr_drop_action;
997 : : else
998 : 0 : priv->root_drop_action = priv->drop_queue.hrxq->action;
999 : : #endif
1000 : : }
1001 : :
1002 : : static void
1003 : 0 : mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
1004 : : {
1005 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
1006 : 0 : void *ctx = priv->sh->cdev->ctx;
1007 : :
1008 : 0 : priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx, NULL);
1009 [ # # ]: 0 : if (!priv->q_counters) {
1010 : 0 : struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
1011 : : struct ibv_wq *wq;
1012 : :
1013 : 0 : DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
1014 : : "by DevX - fall-back to use the kernel driver global "
1015 : : "queue counter.", dev->data->port_id);
1016 : :
1017 : : /* Create WQ by kernel and query its queue counter ID. */
1018 [ # # ]: 0 : if (cq) {
1019 : 0 : wq = mlx5_glue->create_wq(ctx,
1020 : 0 : &(struct ibv_wq_init_attr){
1021 : : .wq_type = IBV_WQT_RQ,
1022 : : .max_wr = 1,
1023 : : .max_sge = 1,
1024 : 0 : .pd = priv->sh->cdev->pd,
1025 : : .cq = cq,
1026 : : });
1027 [ # # ]: 0 : if (wq) {
1028 : : /* Counter is assigned only on RDY state. */
1029 : 0 : int ret = mlx5_glue->modify_wq(wq,
1030 : 0 : &(struct ibv_wq_attr){
1031 : : .attr_mask = IBV_WQ_ATTR_STATE,
1032 : : .wq_state = IBV_WQS_RDY,
1033 : : });
1034 : :
1035 [ # # ]: 0 : if (ret == 0)
1036 : 0 : mlx5_devx_cmd_wq_query(wq,
1037 : : &priv->counter_set_id);
1038 : 0 : claim_zero(mlx5_glue->destroy_wq(wq));
1039 : : }
1040 : 0 : claim_zero(mlx5_glue->destroy_cq(cq));
1041 : : }
1042 : : } else {
1043 : 0 : priv->counter_set_id = priv->q_counters->id;
1044 : : }
1045 [ # # ]: 0 : if (priv->counter_set_id == 0)
1046 : 0 : DRV_LOG(INFO, "Part of the port %d statistics will not be "
1047 : : "available.", dev->data->port_id);
1048 : 0 : }
1049 : :
1050 : : static inline bool
1051 : : mlx5_ignore_pf_representor(const struct rte_eth_devargs *eth_da)
1052 : : {
1053 : 0 : return (eth_da->flags & RTE_ETH_DEVARG_REPRESENTOR_IGNORE_PF) != 0;
1054 : : }
1055 : :
1056 : : static bool
1057 : : is_standard_eswitch(const struct mlx5_dev_spawn_data *spawn)
1058 : : {
1059 : 0 : bool is_bond = spawn->pf_bond >= 0;
1060 : :
1061 [ # # # # : 0 : return !is_bond && spawn->nb_uplinks <= 1 && spawn->nb_hpfs <= 1;
# # # # #
# ]
1062 : : }
1063 : :
1064 : : static bool
1065 : : is_hpf(const struct mlx5_dev_spawn_data *spawn)
1066 : : {
1067 [ # # # # : 0 : return spawn->info.port_name == -1 &&
# # # # ]
1068 [ # # # # ]: 0 : spawn->info.name_type == MLX5_PHYS_PORT_NAME_TYPE_PFHPF;
1069 : : }
1070 : :
1071 : : static int
1072 : 0 : build_port_name(struct rte_device *dpdk_dev,
1073 : : struct mlx5_dev_spawn_data *spawn,
1074 : : char *name,
1075 : : size_t name_sz)
1076 : : {
1077 : 0 : bool is_bond = spawn->pf_bond >= 0;
1078 : : int written = 0;
1079 : : int ret;
1080 : :
1081 [ # # ]: 0 : ret = snprintf(name, name_sz, "%s", dpdk_dev->name);
1082 [ # # ]: 0 : if (ret < 0)
1083 : : return ret;
1084 : : written += ret;
1085 [ # # ]: 0 : if (written >= (int)name_sz)
1086 : : return written;
1087 : :
1088 : : /*
1089 : : * Whenever bond device is detected, include IB device name.
1090 : : * This is kept to keep port naming backward compatible.
1091 : : */
1092 [ # # ]: 0 : if (is_bond) {
1093 [ # # ]: 0 : ret = snprintf(name + written, name_sz - written, "_%s", spawn->phys_dev_name);
1094 [ # # ]: 0 : if (ret < 0)
1095 : : return ret;
1096 : 0 : written += ret;
1097 [ # # ]: 0 : if (written >= (int)name_sz)
1098 : : return written;
1099 : : }
1100 : :
1101 [ # # ]: 0 : if (spawn->info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
1102 : : /* Add port to name if and only if there is more than one uplink. */
1103 [ # # ]: 0 : if (spawn->nb_uplinks <= 1)
1104 : 0 : goto end;
1105 : :
1106 [ # # ]: 0 : ret = snprintf(name + written, name_sz - written, "_p%u", spawn->info.port_name);
1107 [ # # ]: 0 : if (ret < 0)
1108 : : return ret;
1109 : 0 : written += ret;
1110 : : if (written >= (int)name_sz)
1111 : : return written;
1112 [ # # ]: 0 : } else if (spawn->info.representor) {
1113 : : /*
1114 : : * If port is a representor, then switchdev has been enabled.
1115 : : * In that case add controller, PF and VF/SF indexes to port name
1116 : : * if at least one of these conditions are met:
1117 : : * 1. Device is a bond (VF-LAG).
1118 : : * 2. There are multiple uplinks (MPESW).
1119 : : * 3. There are multiple host PFs (BlueField socket direct).
1120 : : *
1121 : : * If none of these conditions apply, then it is assumed that
1122 : : * this device manages a single non-shared E-Switch with single controller,
1123 : : * where there is only one uplink/PF and one host PF (on BlueField).
1124 : : */
1125 [ # # ]: 0 : if (!is_standard_eswitch(spawn))
1126 [ # # ]: 0 : ret = snprintf(name + written, name_sz - written,
1127 : : "_representor_c%dpf%d%s%u",
1128 : : spawn->info.ctrl_num,
1129 : : spawn->info.pf_num,
1130 : : spawn->info.name_type ==
1131 : : MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
1132 : : spawn->info.port_name);
1133 : : else
1134 [ # # ]: 0 : ret = snprintf(name + written, name_sz - written, "_representor_%s%u",
1135 : : spawn->info.name_type ==
1136 : : MLX5_PHYS_PORT_NAME_TYPE_PFSF ? "sf" : "vf",
1137 : : spawn->info.port_name);
1138 [ # # ]: 0 : if (ret < 0)
1139 : : return ret;
1140 : 0 : written += ret;
1141 : : if (written >= (int)name_sz)
1142 : : return written;
1143 : : }
1144 : :
1145 : 0 : end:
1146 : : return written;
1147 : : }
1148 : :
1149 : : static bool
1150 : : representor_match_uplink(const struct mlx5_dev_spawn_data *spawn,
1151 : : uint16_t port_name,
1152 : : const struct rte_eth_devargs *eth_da,
1153 : : uint16_t eth_da_pf_num)
1154 : : {
1155 : 0 : if (spawn->info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1156 : : return false;
1157 : : /* One of the uplinks will be a transfer proxy. Must be probed always. */
1158 [ # # # # ]: 0 : if (spawn->info.master)
1159 : : return true;
1160 [ # # # # ]: 0 : if (mlx5_ignore_pf_representor(eth_da))
1161 : : return false;
1162 : :
1163 : 0 : return port_name == eth_da_pf_num;
1164 : : }
1165 : :
1166 : : static bool
1167 : 0 : representor_match_port(const struct mlx5_dev_spawn_data *spawn,
1168 : : const struct rte_eth_devargs *eth_da)
1169 : : {
1170 [ # # ]: 0 : for (uint16_t p = 0; p < eth_da->nb_ports; ++p) {
1171 : 0 : uint16_t pf_num = eth_da->ports[p];
1172 : :
1173 : : /* PF representor in devargs is interpreted as probing uplink port. */
1174 [ # # ]: 0 : if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
1175 [ # # # # ]: 0 : if (representor_match_uplink(spawn, spawn->info.port_name, eth_da, pf_num))
1176 : : return true;
1177 : :
1178 : 0 : continue;
1179 : : }
1180 : :
1181 : : /* Allow probing related uplink when VF/SF representor is requested. */
1182 [ # # ]: 0 : if ((eth_da->type == RTE_ETH_REPRESENTOR_VF ||
1183 [ # # ]: 0 : eth_da->type == RTE_ETH_REPRESENTOR_SF) &&
1184 [ # # ]: 0 : representor_match_uplink(spawn, spawn->info.pf_num, eth_da, pf_num))
1185 : : return true;
1186 : :
1187 : : /* Uplink ports should not be matched against representor_ports. */
1188 [ # # ]: 0 : if (spawn->info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
1189 : : return false;
1190 : :
1191 [ # # ]: 0 : for (uint16_t f = 0; f < eth_da->nb_representor_ports; ++f) {
1192 [ # # ]: 0 : uint16_t port_num = eth_da->representor_ports[f];
1193 : : bool pf_num_match;
1194 : : bool rep_num_match;
1195 : :
1196 : : /*
1197 : : * In standard E-Switch case, allow probing VFs even if wrong PF index
1198 : : * was provided.
1199 : : */
1200 [ # # ]: 0 : if (is_standard_eswitch(spawn))
1201 : : pf_num_match = true;
1202 : : else
1203 : 0 : pf_num_match = spawn->info.pf_num == pf_num;
1204 : :
1205 : : /* Host PF is indicated through VF/SF representor index == -1. */
1206 [ # # ]: 0 : if (is_hpf(spawn))
1207 : 0 : rep_num_match = port_num == UINT16_MAX;
1208 : : else
1209 : 0 : rep_num_match = port_num == spawn->info.port_name;
1210 : :
1211 [ # # ]: 0 : if (pf_num_match && rep_num_match)
1212 : : return true;
1213 : : }
1214 : : }
1215 : :
1216 : : return false;
1217 : : }
1218 : :
1219 : : /**
1220 : : * Check if representor spawn info match devargs.
1221 : : *
1222 : : * @param spawn
1223 : : * Verbs device parameters (name, port, switch_info) to spawn.
1224 : : * @param eth_da
1225 : : * Device devargs to probe.
1226 : : *
1227 : : * @return
1228 : : * Match result.
1229 : : */
1230 : : static bool
1231 : 0 : mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
1232 : : struct rte_eth_devargs *eth_da)
1233 : : {
1234 : : struct mlx5_switch_info *switch_info = &spawn->info;
1235 : : unsigned int c;
1236 [ # # ]: 0 : bool ignore_ctrl_num = eth_da->nb_mh_controllers == 0 ||
1237 [ # # ]: 0 : switch_info->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK;
1238 : :
1239 [ # # # # : 0 : switch (eth_da->type) {
# ]
1240 : 0 : case RTE_ETH_REPRESENTOR_PF:
1241 [ # # ]: 0 : if (switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
1242 : 0 : rte_errno = EBUSY;
1243 : 0 : return false;
1244 : : }
1245 : : break;
1246 : : case RTE_ETH_REPRESENTOR_SF:
1247 [ # # ]: 0 : if (!is_hpf(spawn) &&
1248 [ # # # # ]: 0 : switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFSF &&
1249 : : switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
1250 : 0 : rte_errno = EBUSY;
1251 : 0 : return false;
1252 : : }
1253 : : break;
1254 : : case RTE_ETH_REPRESENTOR_VF:
1255 [ # # ]: 0 : if (!is_hpf(spawn) &&
1256 [ # # # # ]: 0 : switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_PFVF &&
1257 : : switch_info->name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
1258 : 0 : rte_errno = EBUSY;
1259 : 0 : return false;
1260 : : }
1261 : : break;
1262 : 0 : case RTE_ETH_REPRESENTOR_NONE:
1263 : 0 : rte_errno = EBUSY;
1264 : 0 : return false;
1265 : 0 : default:
1266 : 0 : rte_errno = ENOTSUP;
1267 : 0 : DRV_LOG(ERR, "unsupported representor type");
1268 : 0 : return false;
1269 : : }
1270 [ # # ]: 0 : if (!ignore_ctrl_num) {
1271 [ # # ]: 0 : for (c = 0; c < eth_da->nb_mh_controllers; ++c) {
1272 : 0 : uint16_t ctrl_num = eth_da->mh_controllers[c];
1273 : :
1274 [ # # # # ]: 0 : if (spawn->info.ctrl_num == ctrl_num &&
1275 : 0 : representor_match_port(spawn, eth_da))
1276 : : return true;
1277 : : }
1278 : : } else {
1279 [ # # ]: 0 : if (representor_match_port(spawn, eth_da))
1280 : : return true;
1281 : : }
1282 : 0 : rte_errno = EBUSY;
1283 : 0 : return false;
1284 : : }
1285 : :
1286 : : /**
1287 : : * Spawn an Ethernet device from Verbs information.
1288 : : *
1289 : : * @param dpdk_dev
1290 : : * Backing DPDK device.
1291 : : * @param spawn
1292 : : * Verbs device parameters (name, port, switch_info) to spawn.
1293 : : * @param eth_da
1294 : : * Device arguments.
1295 : : * @param mkvlist
1296 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
1297 : : *
1298 : : * @return
1299 : : * A valid Ethernet device object on success, NULL otherwise and rte_errno
1300 : : * is set. The following errors are defined:
1301 : : *
1302 : : * EBUSY: device is not supposed to be spawned.
1303 : : * EEXIST: device is already spawned
1304 : : */
1305 : : static struct rte_eth_dev *
1306 : 0 : mlx5_dev_spawn(struct rte_device *dpdk_dev,
1307 : : struct mlx5_dev_spawn_data *spawn,
1308 : : struct rte_eth_devargs *eth_da,
1309 : : struct mlx5_kvargs_ctrl *mkvlist)
1310 : : {
1311 : 0 : const struct mlx5_switch_info *switch_info = &spawn->info;
1312 : : struct mlx5_dev_ctx_shared *sh = NULL;
1313 : 0 : struct ibv_port_attr port_attr = { .state = IBV_PORT_NOP };
1314 : : struct rte_eth_dev *eth_dev = NULL;
1315 : : struct mlx5_priv *priv = NULL;
1316 : : int err = 0;
1317 : : struct rte_ether_addr mac;
1318 : : char name[RTE_ETH_NAME_MAX_LEN];
1319 : : int own_domain_id = 0;
1320 : : uint16_t port_id;
1321 : 0 : struct mlx5_port_info vport_info = { .query_flags = 0 };
1322 : : int nl_rdma;
1323 : : int i;
1324 : : struct mlx5_indexed_pool_config icfg[RTE_DIM(default_icfg)];
1325 : :
1326 : : memcpy(icfg, default_icfg, sizeof(icfg));
1327 : : /* Determine if this port representor is supposed to be spawned. */
1328 [ # # # # : 0 : if (switch_info->representor && dpdk_dev->devargs &&
# # ]
1329 : 0 : !mlx5_representor_match(spawn, eth_da))
1330 : : return NULL;
1331 : : /* Build device name. */
1332 : 0 : err = build_port_name(dpdk_dev, spawn, name, sizeof(name));
1333 [ # # ]: 0 : if (err < 0) {
1334 : 0 : DRV_LOG(ERR, "Failed to build port name for IB device %s/%u",
1335 : : spawn->phys_dev_name, spawn->phys_port);
1336 : 0 : rte_errno = EINVAL;
1337 : 0 : return NULL;
1338 : : }
1339 [ # # ]: 0 : if (err >= (int)sizeof(name))
1340 : 0 : DRV_LOG(WARNING, "device name overflow %s", name);
1341 : : /* check if the device is already spawned */
1342 [ # # ]: 0 : if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) {
1343 : : /*
1344 : : * When device is already spawned, its devargs should be set
1345 : : * as used. otherwise, mlx5_kvargs_validate() will fail.
1346 : : */
1347 [ # # ]: 0 : if (mkvlist)
1348 : 0 : mlx5_port_args_set_used(name, port_id, mkvlist);
1349 : 0 : rte_errno = EEXIST;
1350 : 0 : return NULL;
1351 : : }
1352 : 0 : DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
1353 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1354 : : struct mlx5_mp_id mp_id;
1355 : : int fd;
1356 : :
1357 : 0 : eth_dev = rte_eth_dev_attach_secondary(name);
1358 [ # # ]: 0 : if (eth_dev == NULL) {
1359 : 0 : DRV_LOG(ERR, "can not attach rte ethdev");
1360 : 0 : rte_errno = ENOMEM;
1361 : 0 : return NULL;
1362 : : }
1363 : 0 : eth_dev->device = dpdk_dev;
1364 : 0 : eth_dev->dev_ops = &mlx5_dev_sec_ops;
1365 : 0 : eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1366 : 0 : eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1367 : 0 : err = mlx5_proc_priv_init(eth_dev);
1368 [ # # ]: 0 : if (err)
1369 : : return NULL;
1370 : 0 : mlx5_mp_id_init(&mp_id, eth_dev->data->port_id);
1371 : : /* Receive command fd from primary process */
1372 : 0 : fd = mlx5_mp_req_verbs_cmd_fd(&mp_id);
1373 [ # # ]: 0 : if (fd < 0)
1374 : 0 : goto err_secondary;
1375 : : /* Remap UAR for Tx queues. */
1376 : 0 : err = mlx5_tx_uar_init_secondary(eth_dev, fd);
1377 : 0 : close(fd);
1378 [ # # ]: 0 : if (err)
1379 : 0 : goto err_secondary;
1380 : : /*
1381 : : * Ethdev pointer is still required as input since
1382 : : * the primary device is not accessible from the
1383 : : * secondary process.
1384 : : */
1385 : 0 : eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
1386 : 0 : eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
1387 : 0 : return eth_dev;
1388 : 0 : err_secondary:
1389 : 0 : mlx5_dev_close(eth_dev);
1390 : 0 : return NULL;
1391 : : }
1392 : 0 : sh = mlx5_alloc_shared_dev_ctx(spawn, mkvlist);
1393 [ # # ]: 0 : if (!sh)
1394 : : return NULL;
1395 : 0 : nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
1396 : : /* Check port status. */
1397 [ # # ]: 0 : if (spawn->phys_port <= UINT8_MAX) {
1398 : : /* Legacy Verbs api only support u8 port number. */
1399 : 0 : err = mlx5_glue->query_port(sh->cdev->ctx, spawn->phys_port,
1400 : : &port_attr);
1401 [ # # ]: 0 : if (err) {
1402 : 0 : DRV_LOG(ERR, "port query failed: %s", strerror(err));
1403 : 0 : goto error;
1404 : : }
1405 [ # # ]: 0 : if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
1406 : 0 : DRV_LOG(ERR, "port is not configured in Ethernet mode");
1407 : : err = EINVAL;
1408 : 0 : goto error;
1409 : : }
1410 [ # # ]: 0 : } else if (nl_rdma >= 0) {
1411 : : /* IB doesn't allow more than 255 ports, must be Ethernet. */
1412 : 0 : err = mlx5_nl_port_state(nl_rdma,
1413 : : spawn->phys_dev_name,
1414 : 0 : spawn->phys_port, &spawn->cdev->dev_info);
1415 [ # # ]: 0 : if (err < 0) {
1416 : 0 : DRV_LOG(INFO, "Failed to get netlink port state: %s",
1417 : : strerror(rte_errno));
1418 : 0 : err = -rte_errno;
1419 : 0 : goto error;
1420 : : }
1421 : 0 : port_attr.state = (enum ibv_port_state)err;
1422 : : }
1423 [ # # ]: 0 : if (port_attr.state != IBV_PORT_ACTIVE)
1424 : 0 : DRV_LOG(INFO, "port is not active: \"%s\" (%d)",
1425 : : mlx5_glue->port_state_str(port_attr.state),
1426 : : port_attr.state);
1427 : : /* Allocate private eth device data. */
1428 : 0 : priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1429 : : sizeof(*priv),
1430 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1431 [ # # ]: 0 : if (priv == NULL) {
1432 : 0 : DRV_LOG(ERR, "priv allocation failure");
1433 : : err = ENOMEM;
1434 : 0 : goto error;
1435 : : }
1436 : : /*
1437 : : * When user configures remote PD and CTX and device creates RxQ by
1438 : : * DevX, external RxQ is both supported and requested.
1439 : : */
1440 [ # # # # : 0 : if (mlx5_imported_pd_and_ctx(sh->cdev) && mlx5_devx_obj_ops_en(sh)) {
# # ]
1441 : 0 : priv->ext_rxqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1442 : : sizeof(struct mlx5_external_q) *
1443 : : MLX5_MAX_EXT_RX_QUEUES, 0,
1444 : : SOCKET_ID_ANY);
1445 [ # # ]: 0 : if (priv->ext_rxqs == NULL) {
1446 : 0 : DRV_LOG(ERR, "Fail to allocate external RxQ array.");
1447 : : err = ENOMEM;
1448 : 0 : goto error;
1449 : : }
1450 : 0 : priv->ext_txqs = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
1451 : : sizeof(struct mlx5_external_q) *
1452 : : MLX5_MAX_EXT_TX_QUEUES, 0,
1453 : : SOCKET_ID_ANY);
1454 [ # # ]: 0 : if (priv->ext_txqs == NULL) {
1455 : 0 : DRV_LOG(ERR, "Fail to allocate external TxQ array.");
1456 : : err = ENOMEM;
1457 : 0 : goto error;
1458 : : }
1459 : 0 : DRV_LOG(DEBUG, "External queue is supported.");
1460 : : }
1461 : 0 : priv->sh = sh;
1462 : 0 : priv->dev_port = spawn->phys_port;
1463 : 0 : priv->pci_dev = spawn->pci_dev;
1464 : : /* Some internal functions rely on Netlink sockets, open them now. */
1465 : 0 : priv->nl_socket_rdma = nl_rdma;
1466 : 0 : priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE, 0);
1467 : 0 : priv->representor = !!switch_info->representor;
1468 : 0 : priv->master = !!switch_info->master;
1469 : 0 : priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
1470 : 0 : priv->vport_meta_tag = 0;
1471 : 0 : priv->vport_meta_mask = 0;
1472 : 0 : priv->pf_bond = spawn->pf_bond;
1473 : 0 : priv->mpesw_port = spawn->mpesw_port;
1474 : 0 : priv->mpesw_uplink = false;
1475 : 0 : priv->mpesw_owner = spawn->info.mpesw_owner;
1476 [ # # ]: 0 : if (mlx5_is_port_on_mpesw_device(priv))
1477 : 0 : priv->mpesw_uplink = (spawn->info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK);
1478 : :
1479 [ # # ]: 0 : DRV_LOG(DEBUG,
1480 : : "dev_port=%u bus=%s pci=%s master=%d representor=%d pf_bond=%d "
1481 : : "mpesw_port=%d mpesw_uplink=%d",
1482 : : priv->dev_port, dpdk_dev->bus->name,
1483 : : priv->pci_dev ? priv->pci_dev->name : "NONE",
1484 : : priv->master, priv->representor, priv->pf_bond,
1485 : : priv->mpesw_port, priv->mpesw_uplink);
1486 : :
1487 [ # # # # ]: 0 : if (mlx5_is_port_on_mpesw_device(priv) && priv->sh->config.dv_flow_en != 2) {
1488 : 0 : DRV_LOG(ERR, "MPESW device is supported only with HWS");
1489 : : err = ENOTSUP;
1490 : 0 : goto error;
1491 : : }
1492 : : /*
1493 : : * If we have E-Switch we should determine the vport attributes.
1494 : : * E-Switch may use either source vport field or reg_c[0] metadata
1495 : : * register to match on vport index. The engaged part of metadata
1496 : : * register is defined by mask.
1497 : : */
1498 [ # # ]: 0 : if (sh->esw_mode) {
1499 : 0 : err = mlx5_glue->devx_port_query(sh->cdev->ctx,
1500 : : spawn->phys_port,
1501 : : &vport_info);
1502 [ # # ]: 0 : if (err) {
1503 : 0 : DRV_LOG(WARNING,
1504 : : "Cannot query devx port %d on device %s",
1505 : : spawn->phys_port, spawn->phys_dev_name);
1506 : 0 : vport_info.query_flags = 0;
1507 : : }
1508 : : }
1509 [ # # ]: 0 : if (vport_info.query_flags & MLX5_PORT_QUERY_REG_C0) {
1510 : 0 : priv->vport_meta_tag = vport_info.vport_meta_tag;
1511 : 0 : priv->vport_meta_mask = vport_info.vport_meta_mask;
1512 [ # # ]: 0 : if (!priv->vport_meta_mask) {
1513 : 0 : DRV_LOG(ERR,
1514 : : "vport zero mask for port %d on bonding device %s",
1515 : : spawn->phys_port, spawn->phys_dev_name);
1516 : : err = ENOTSUP;
1517 : 0 : goto error;
1518 : : }
1519 [ # # ]: 0 : if (priv->vport_meta_tag & ~priv->vport_meta_mask) {
1520 : 0 : DRV_LOG(ERR,
1521 : : "Invalid vport tag for port %d on bonding device %s",
1522 : : spawn->phys_port, spawn->phys_dev_name);
1523 : : err = ENOTSUP;
1524 : 0 : goto error;
1525 : : }
1526 : : }
1527 [ # # ]: 0 : if (vport_info.query_flags & MLX5_PORT_QUERY_VPORT) {
1528 : 0 : priv->vport_id = vport_info.vport_id;
1529 [ # # # # ]: 0 : } else if (spawn->pf_bond >= 0 && sh->esw_mode) {
1530 : 0 : DRV_LOG(ERR,
1531 : : "Cannot deduce vport index for port %d on bonding device %s",
1532 : : spawn->phys_port, spawn->phys_dev_name);
1533 : : err = ENOTSUP;
1534 : 0 : goto error;
1535 : : } else {
1536 : : /*
1537 : : * Suppose vport index in compatible way. Kernel/rdma_core
1538 : : * support single E-Switch per PF configurations only and
1539 : : * vport_id field contains the vport index for associated VF,
1540 : : * which is deduced from representor port name.
1541 : : * For example, let's have the IB device port 10, it has
1542 : : * attached network device eth0, which has port name attribute
1543 : : * pf0vf2, we can deduce the VF number as 2, and set vport index
1544 : : * as 3 (2+1). This assigning schema should be changed if the
1545 : : * multiple E-Switch instances per PF configurations or/and PCI
1546 : : * subfunctions are added.
1547 : : */
1548 [ # # ]: 0 : priv->vport_id = switch_info->representor ?
1549 : 0 : switch_info->port_name + 1 : -1;
1550 : : }
1551 : 0 : priv->representor_id = mlx5_representor_id_encode(switch_info,
1552 : : eth_da->type);
1553 : : /*
1554 : : * Look for sibling devices in order to reuse their switch domain
1555 : : * if any, otherwise allocate one.
1556 : : */
1557 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
1558 : 0 : const struct mlx5_priv *opriv =
1559 : 0 : rte_eth_devices[port_id].data->dev_private;
1560 : :
1561 [ # # ]: 0 : if (!opriv ||
1562 [ # # ]: 0 : opriv->sh != priv->sh ||
1563 [ # # ]: 0 : opriv->domain_id ==
1564 : : RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
1565 : : continue;
1566 : 0 : priv->domain_id = opriv->domain_id;
1567 : 0 : DRV_LOG(DEBUG, "dev_port-%u inherit domain_id=%u\n",
1568 : : priv->dev_port, priv->domain_id);
1569 : 0 : break;
1570 : : }
1571 [ # # ]: 0 : if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
1572 : 0 : err = rte_eth_switch_domain_alloc(&priv->domain_id);
1573 [ # # ]: 0 : if (err) {
1574 : 0 : err = rte_errno;
1575 : 0 : DRV_LOG(ERR, "unable to allocate switch domain: %s",
1576 : : strerror(rte_errno));
1577 : 0 : goto error;
1578 : : }
1579 : : own_domain_id = 1;
1580 : 0 : DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
1581 : : priv->dev_port, priv->domain_id);
1582 : : }
1583 [ # # ]: 0 : if (sh->cdev->config.devx) {
1584 : : struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
1585 : :
1586 : 0 : sh->steering_format_version = hca_attr->steering_format_version;
1587 : : #if defined(HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT)
1588 [ # # # # ]: 0 : if (hca_attr->qos.sup && hca_attr->qos.flow_meter_old &&
1589 : : sh->config.dv_flow_en) {
1590 [ # # ]: 0 : if (sh->registers.aso_reg != REG_NON) {
1591 : 0 : priv->mtr_en = 1;
1592 : 0 : priv->mtr_reg_share = hca_attr->qos.flow_meter;
1593 : : }
1594 : : }
1595 [ # # ]: 0 : if (hca_attr->qos.sup && hca_attr->qos.flow_meter_aso_sup) {
1596 : : uint32_t log_obj_size =
1597 : : rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
1598 : 0 : if (log_obj_size >=
1599 [ # # ]: 0 : hca_attr->qos.log_meter_aso_granularity &&
1600 : : log_obj_size <=
1601 [ # # ]: 0 : hca_attr->qos.log_meter_aso_max_alloc)
1602 : 0 : sh->meter_aso_en = 1;
1603 : : }
1604 [ # # ]: 0 : if (priv->mtr_en) {
1605 : 0 : err = mlx5_aso_flow_mtrs_mng_init(priv->sh);
1606 [ # # ]: 0 : if (err) {
1607 : 0 : err = -err;
1608 : 0 : goto error;
1609 : : }
1610 : : }
1611 [ # # ]: 0 : if (hca_attr->flow.tunnel_header_0_1)
1612 : 0 : sh->tunnel_header_0_1 = 1;
1613 [ # # ]: 0 : if (hca_attr->flow.tunnel_header_2_3)
1614 : 0 : sh->tunnel_header_2_3 = 1;
1615 : : #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO_EXT */
1616 : : #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
1617 [ # # # # ]: 0 : if (hca_attr->flow_hit_aso && sh->registers.aso_reg == REG_C_3) {
1618 : 0 : sh->flow_hit_aso_en = 1;
1619 : 0 : err = mlx5_flow_aso_age_mng_init(sh);
1620 [ # # ]: 0 : if (err) {
1621 : 0 : err = -err;
1622 : 0 : goto error;
1623 : : }
1624 : 0 : DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
1625 : : }
1626 : : #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
1627 : : #if defined (HAVE_MLX5_DR_CREATE_ACTION_ASO) && \
1628 : : defined (HAVE_MLX5_DR_ACTION_ASO_CT)
1629 : : /* HWS create CT ASO SQ based on HWS configure queue number. */
1630 [ # # # # ]: 0 : if (sh->config.dv_flow_en != 2 &&
1631 [ # # ]: 0 : hca_attr->ct_offload && sh->registers.aso_reg == REG_C_3) {
1632 : 0 : err = mlx5_flow_aso_ct_mng_init(sh);
1633 [ # # ]: 0 : if (err) {
1634 : 0 : err = -err;
1635 : 0 : goto error;
1636 : : }
1637 : 0 : DRV_LOG(DEBUG, "CT ASO is supported.");
1638 : 0 : sh->ct_aso_en = 1;
1639 : : }
1640 : : #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO && HAVE_MLX5_DR_ACTION_ASO_CT */
1641 : : #if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
1642 [ # # # # ]: 0 : if (hca_attr->log_max_ft_sampler_num > 0 &&
1643 : : sh->config.dv_flow_en) {
1644 : 0 : priv->sampler_en = 1;
1645 : 0 : DRV_LOG(DEBUG, "Sampler enabled!");
1646 : : } else {
1647 : 0 : priv->sampler_en = 0;
1648 [ # # ]: 0 : if (!hca_attr->log_max_ft_sampler_num)
1649 : 0 : DRV_LOG(WARNING,
1650 : : "No available register for sampler.");
1651 : : else
1652 : 0 : DRV_LOG(DEBUG, "DV flow is not supported!");
1653 : : }
1654 : : #endif
1655 [ # # ]: 0 : if (hca_attr->lag_rx_port_affinity) {
1656 : 0 : sh->lag_rx_port_affinity_en = 1;
1657 : 0 : DRV_LOG(DEBUG, "LAG Rx Port Affinity enabled");
1658 : : }
1659 : 0 : priv->num_lag_ports = hca_attr->num_lag_ports;
1660 : 0 : DRV_LOG(DEBUG, "The number of lag ports is %d", priv->num_lag_ports);
1661 : : }
1662 : : /* Process parameters and store port configuration on priv structure. */
1663 : 0 : err = mlx5_port_args_config(priv, mkvlist, &priv->config);
1664 [ # # ]: 0 : if (err) {
1665 : 0 : err = rte_errno;
1666 : 0 : DRV_LOG(ERR, "Failed to process port configure: %s",
1667 : : strerror(rte_errno));
1668 : 0 : goto error;
1669 : : }
1670 : 0 : eth_dev = rte_eth_dev_allocate(name);
1671 [ # # ]: 0 : if (eth_dev == NULL) {
1672 : 0 : DRV_LOG(ERR, "can not allocate rte ethdev");
1673 : : err = ENOMEM;
1674 : 0 : goto error;
1675 : : }
1676 [ # # ]: 0 : if (priv->representor) {
1677 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
1678 : 0 : eth_dev->data->representor_id = priv->representor_id;
1679 [ # # ]: 0 : MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) {
1680 : 0 : struct mlx5_priv *opriv =
1681 : 0 : rte_eth_devices[port_id].data->dev_private;
1682 [ # # # # ]: 0 : if (opriv &&
1683 : 0 : opriv->master &&
1684 [ # # ]: 0 : opriv->domain_id == priv->domain_id &&
1685 [ # # ]: 0 : opriv->sh == priv->sh) {
1686 : 0 : eth_dev->data->backer_port_id = port_id;
1687 : 0 : break;
1688 : : }
1689 : : }
1690 [ # # ]: 0 : if (port_id >= RTE_MAX_ETHPORTS)
1691 : 0 : eth_dev->data->backer_port_id = eth_dev->data->port_id;
1692 : : }
1693 : 0 : priv->mp_id.port_id = eth_dev->data->port_id;
1694 : 0 : strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
1695 : : /*
1696 : : * Store associated network device interface index. This index
1697 : : * is permanent throughout the lifetime of device. So, we may store
1698 : : * the ifindex here and use the cached value further.
1699 : : */
1700 : : MLX5_ASSERT(spawn->ifindex);
1701 : 0 : priv->if_index = spawn->ifindex;
1702 : 0 : priv->lag_affinity_idx = sh->refcnt - 1;
1703 : 0 : eth_dev->data->dev_private = priv;
1704 : 0 : priv->dev_data = eth_dev->data;
1705 : 0 : eth_dev->data->mac_addrs = priv->mac;
1706 : 0 : eth_dev->device = dpdk_dev;
1707 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1708 : : /* Fetch minimum and maximum allowed MTU from the device. */
1709 : 0 : mlx5_get_mtu_bounds(eth_dev, &priv->min_mtu, &priv->max_mtu);
1710 : : /* Configure the first MAC address by default. */
1711 [ # # ]: 0 : if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
1712 : 0 : DRV_LOG(ERR,
1713 : : "port %u cannot get MAC address, is mlx5_en"
1714 : : " loaded? (errno: %s)",
1715 : : eth_dev->data->port_id, strerror(rte_errno));
1716 : : err = ENODEV;
1717 : 0 : goto error;
1718 : : }
1719 : 0 : DRV_LOG(INFO,
1720 : : "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT,
1721 : : eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac));
1722 : : #ifdef RTE_PMD_MLX5_DEBUG
1723 : : {
1724 : : char ifname[MLX5_NAMESIZE];
1725 : :
1726 : : if (mlx5_get_ifname(eth_dev, ifname) == 0)
1727 : : DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
1728 : : eth_dev->data->port_id, ifname);
1729 : : else
1730 : : DRV_LOG(DEBUG, "port %u ifname is unknown",
1731 : : eth_dev->data->port_id);
1732 : : }
1733 : : #endif
1734 : : /* Get actual MTU if possible. */
1735 : 0 : err = mlx5_get_mtu(eth_dev, ð_dev->data->mtu);
1736 [ # # ]: 0 : if (err) {
1737 : 0 : err = rte_errno;
1738 : 0 : goto error;
1739 : : }
1740 : 0 : DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
1741 : : eth_dev->data->mtu);
1742 : : /* Initialize burst functions to prevent crashes before link-up. */
1743 : 0 : eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
1744 : 0 : eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
1745 : 0 : eth_dev->dev_ops = &mlx5_dev_ops;
1746 : 0 : eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
1747 : 0 : eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
1748 : 0 : eth_dev->rx_queue_count = mlx5_rx_queue_count;
1749 : : /* Register MAC address. */
1750 : 0 : claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
1751 : : /* Sync mac addresses for PF or VF/SF if vf_nl_en is true */
1752 [ # # # # ]: 0 : if ((!sh->dev_cap.vf && !sh->dev_cap.sf) || sh->config.vf_nl_en)
1753 : 0 : mlx5_nl_mac_addr_sync(priv->nl_socket_route,
1754 : : mlx5_ifindex(eth_dev),
1755 : 0 : eth_dev->data->mac_addrs,
1756 : : MLX5_MAX_MAC_ADDRESSES);
1757 [ # # ]: 0 : priv->ctrl_flows = 0;
1758 : : rte_spinlock_init(&priv->flow_list_lock);
1759 : 0 : TAILQ_INIT(&priv->flow_meters);
1760 [ # # ]: 0 : if (priv->mtr_en) {
1761 : 0 : priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR);
1762 [ # # ]: 0 : if (!priv->mtr_profile_tbl)
1763 : 0 : goto error;
1764 : : }
1765 : : /* Bring Ethernet device up. */
1766 : 0 : DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
1767 : : eth_dev->data->port_id);
1768 : : /* Read link status in case it is up and there will be no event. */
1769 : 0 : mlx5_link_update(eth_dev, 0);
1770 : : /* Watch LSC interrupts between port probe and port start. */
1771 : 0 : priv->sh->port[priv->dev_port - 1].nl_ih_port_id = eth_dev->data->port_id;
1772 : 0 : mlx5_set_link_up(eth_dev);
1773 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
1774 : 0 : icfg[i].release_mem_en = !!sh->config.reclaim_mode;
1775 [ # # ]: 0 : if (sh->config.reclaim_mode)
1776 : 0 : icfg[i].per_core_cache = 0;
1777 : : #ifdef HAVE_MLX5_HWS_SUPPORT
1778 [ # # ]: 0 : if (priv->sh->config.dv_flow_en == 2) {
1779 : : icfg[i].size = sizeof(struct rte_flow_hw) + sizeof(struct rte_flow_nt2hws);
1780 : 0 : icfg[i].size += sizeof(struct rte_flow_hw_aux);
1781 : : }
1782 : : #endif
1783 : 0 : priv->flows[i] = mlx5_ipool_create(&icfg[i]);
1784 [ # # ]: 0 : if (!priv->flows[i])
1785 : 0 : goto error;
1786 : : }
1787 : : /* Create context for virtual machine VLAN workaround. */
1788 [ # # ]: 0 : priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
1789 [ # # ]: 0 : if (mlx5_devx_obj_ops_en(sh)) {
1790 : 0 : priv->obj_ops = mlx5_devx_obj_ops;
1791 : 0 : mlx5_queue_counter_id_prepare(eth_dev);
1792 : 0 : priv->obj_ops.lb_dummy_queue_create =
1793 : : mlx5_rxq_ibv_obj_dummy_lb_create;
1794 : 0 : priv->obj_ops.lb_dummy_queue_release =
1795 : : mlx5_rxq_ibv_obj_dummy_lb_release;
1796 [ # # ]: 0 : } else if (spawn->max_port > UINT8_MAX) {
1797 : : /* Verbs can't support ports larger than 255 by design. */
1798 : 0 : DRV_LOG(ERR, "must enable DV and ESW when RDMA link ports > 255");
1799 : : err = ENOTSUP;
1800 : 0 : goto error;
1801 : : } else {
1802 : 0 : priv->obj_ops = mlx5_ibv_obj_ops;
1803 : : }
1804 [ # # ]: 0 : if (sh->config.tx_pp &&
1805 [ # # ]: 0 : priv->obj_ops.txq_obj_new != mlx5_txq_devx_obj_new) {
1806 : : /*
1807 : : * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support
1808 : : * packet pacing and already checked above.
1809 : : * Hence, we should only make sure the SQs will be created
1810 : : * with DevX, not with Verbs.
1811 : : * Verbs allocates the SQ UAR on its own and it can't be shared
1812 : : * with Clock Queue UAR as required for Tx scheduling.
1813 : : */
1814 : 0 : DRV_LOG(ERR, "Verbs SQs, UAR can't be shared as required for packet pacing");
1815 : : err = ENODEV;
1816 : 0 : goto error;
1817 : : }
1818 : 0 : priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
1819 [ # # ]: 0 : if (!priv->drop_queue.hrxq)
1820 : 0 : goto error;
1821 : 0 : priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true,
1822 : : mlx5_hrxq_create_cb,
1823 : : mlx5_hrxq_match_cb,
1824 : : mlx5_hrxq_remove_cb,
1825 : : mlx5_hrxq_clone_cb,
1826 : : mlx5_hrxq_clone_free_cb);
1827 [ # # ]: 0 : if (!priv->hrxqs)
1828 : 0 : goto error;
1829 : 0 : mlx5_set_metadata_mask(eth_dev);
1830 [ # # ]: 0 : if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1831 [ # # ]: 0 : !priv->sh->dv_regc0_mask) {
1832 : 0 : DRV_LOG(ERR, "metadata mode %u is not supported "
1833 : : "(no metadata reg_c[0] is available)",
1834 : : sh->config.dv_xmeta_en);
1835 : : err = ENOTSUP;
1836 : 0 : goto error;
1837 : : }
1838 : : rte_rwlock_init(&priv->ind_tbls_lock);
1839 [ # # ]: 0 : if (sh->config.dv_flow_en) {
1840 : 0 : err = mlx5_alloc_shared_dr(eth_dev);
1841 [ # # ]: 0 : if (err)
1842 : 0 : goto error;
1843 [ # # ]: 0 : if (mlx5_flex_item_port_init(eth_dev) < 0)
1844 : 0 : goto error;
1845 : : }
1846 [ # # ]: 0 : if (sh->phdev->config.ipv6_tc_fallback == MLX5_IPV6_TC_UNKNOWN) {
1847 : 0 : sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_OK;
1848 [ # # ]: 0 : if (!sh->cdev->config.hca_attr.modify_outer_ipv6_traffic_class ||
1849 [ # # # # ]: 0 : (sh->config.dv_flow_en == 1 && mlx5_flow_discover_ipv6_tc_support(eth_dev)))
1850 : 0 : sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_FALLBACK;
1851 : : }
1852 : : rte_spinlock_init(&priv->hw_ctrl_lock);
1853 : 0 : LIST_INIT(&priv->hw_ctrl_flows);
1854 : 0 : LIST_INIT(&priv->hw_ext_ctrl_flows);
1855 [ # # ]: 0 : if (priv->sh->config.dv_flow_en == 2) {
1856 : : #ifdef HAVE_MLX5_HWS_SUPPORT
1857 : : /*
1858 : : * Unified FDB flag is only needed for the actions created on the transfer
1859 : : * port. proxy port. It is not needed on the following ports:
1860 : : * 1. NIC PF / VF / SF
1861 : : * 2. in Verbs or DV/DR mode
1862 : : * 3. with unsupported FW
1863 : : * 4. all representors in HWS
1864 : : */
1865 [ # # # # ]: 0 : priv->unified_fdb_en = !!priv->master && sh->cdev->config.hca_attr.fdb_unified_en;
1866 : : /* Jump FDB Rx works only with unified FDB enabled. */
1867 [ # # ]: 0 : if (priv->unified_fdb_en)
1868 : 0 : priv->jump_fdb_rx_en = sh->cdev->config.hca_attr.jump_fdb_rx_en;
1869 [ # # # # ]: 0 : DRV_LOG(DEBUG, "port %u: unified FDB %s enabled, jump_fdb_rx %s enabled.",
1870 : : eth_dev->data->port_id,
1871 : : priv->unified_fdb_en ? "is" : "isn't",
1872 : : priv->jump_fdb_rx_en ? "is" : "isn't");
1873 [ # # ]: 0 : if (priv->sh->config.dv_esw_en) {
1874 : : uint32_t usable_bits;
1875 : : uint32_t required_bits;
1876 : :
1877 [ # # ]: 0 : if (priv->sh->dv_regc0_mask == UINT32_MAX) {
1878 : 0 : DRV_LOG(ERR, "E-Switch port metadata is required when using HWS "
1879 : : "but it is disabled (configure it through devlink)");
1880 : : err = ENOTSUP;
1881 : 0 : goto error;
1882 : : }
1883 [ # # ]: 0 : if (priv->sh->dv_regc0_mask == 0) {
1884 : 0 : DRV_LOG(ERR, "E-Switch with HWS is not supported "
1885 : : "(no available bits in reg_c[0])");
1886 : : err = ENOTSUP;
1887 : 0 : goto error;
1888 : : }
1889 : : usable_bits = rte_popcount32(priv->sh->dv_regc0_mask);
1890 : 0 : required_bits = rte_popcount32(priv->vport_meta_mask);
1891 [ # # ]: 0 : if (usable_bits < required_bits) {
1892 : 0 : DRV_LOG(ERR, "Not enough bits available in reg_c[0] to provide "
1893 : : "representor matching.");
1894 : : err = ENOTSUP;
1895 : 0 : goto error;
1896 : : }
1897 : : }
1898 [ # # ]: 0 : if (priv->vport_meta_mask)
1899 : 0 : mlx5_flow_hw_set_port_info(eth_dev);
1900 [ # # ]: 0 : if (priv->sh->config.dv_esw_en &&
1901 [ # # # # ]: 0 : priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1902 : : priv->sh->config.dv_xmeta_en != MLX5_XMETA_MODE_META32_HWS) {
1903 : 0 : DRV_LOG(ERR,
1904 : : "metadata mode %u is not supported in HWS eswitch mode",
1905 : : priv->sh->config.dv_xmeta_en);
1906 : : err = ENOTSUP;
1907 : 0 : goto error;
1908 : : }
1909 [ # # # # ]: 0 : if (priv->sh->config.dv_esw_en &&
1910 : 0 : mlx5_flow_hw_create_vport_action(eth_dev)) {
1911 : 0 : DRV_LOG(ERR, "port %u failed to create vport action",
1912 : : eth_dev->data->port_id);
1913 : : err = EINVAL;
1914 : 0 : goto error;
1915 : : }
1916 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1917 : 0 : return eth_dev;
1918 : : #else
1919 : : DRV_LOG(ERR, "DV support is missing for HWS.");
1920 : : goto error;
1921 : : #endif
1922 : : }
1923 [ # # ]: 0 : if (!priv->sh->flow_priority_check_flag) {
1924 : : /* Supported Verbs flow priority number detection. */
1925 : 0 : err = mlx5_flow_discover_priorities(eth_dev);
1926 : 0 : priv->sh->flow_max_priority = err;
1927 : 0 : priv->sh->flow_priority_check_flag = 1;
1928 : : } else {
1929 : 0 : err = priv->sh->flow_max_priority;
1930 : : }
1931 [ # # ]: 0 : if (err < 0) {
1932 : 0 : err = -err;
1933 : 0 : goto error;
1934 : : }
1935 : : rte_spinlock_init(&priv->shared_act_sl);
1936 : 0 : mlx5_flow_counter_mode_config(eth_dev);
1937 : 0 : mlx5_flow_drop_action_config(eth_dev);
1938 [ # # ]: 0 : if (sh->config.dv_flow_en)
1939 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
1940 : : return eth_dev;
1941 : 0 : error:
1942 [ # # ]: 0 : if (priv) {
1943 : 0 : priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
1944 : : RTE_MAX_ETHPORTS;
1945 : 0 : rte_io_wmb();
1946 : : #ifdef HAVE_MLX5_HWS_SUPPORT
1947 [ # # ]: 0 : if (eth_dev &&
1948 [ # # ]: 0 : priv->sh &&
1949 [ # # ]: 0 : priv->sh->config.dv_flow_en == 2 &&
1950 : : priv->sh->config.dv_esw_en)
1951 : 0 : mlx5_flow_hw_destroy_vport_action(eth_dev);
1952 : : #endif
1953 [ # # ]: 0 : if (priv->sh)
1954 : 0 : mlx5_os_free_shared_dr(priv);
1955 [ # # ]: 0 : if (priv->nl_socket_route >= 0)
1956 : 0 : close(priv->nl_socket_route);
1957 [ # # ]: 0 : if (priv->vmwa_context)
1958 : 0 : mlx5_vlan_vmwa_exit(priv->vmwa_context);
1959 [ # # ]: 0 : for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
1960 [ # # ]: 0 : if (!priv->flows[i])
1961 : 0 : continue;
1962 : 0 : mlx5_ipool_destroy(priv->flows[i]);
1963 : : }
1964 [ # # # # ]: 0 : if (eth_dev && priv->drop_queue.hrxq)
1965 : 0 : mlx5_drop_action_destroy(eth_dev);
1966 [ # # ]: 0 : if (priv->mtr_profile_tbl)
1967 : 0 : mlx5_l3t_destroy(priv->mtr_profile_tbl);
1968 [ # # ]: 0 : if (own_domain_id)
1969 : 0 : claim_zero(rte_eth_switch_domain_free(priv->domain_id));
1970 [ # # ]: 0 : if (priv->hrxqs)
1971 : 0 : mlx5_list_destroy(priv->hrxqs);
1972 [ # # # # ]: 0 : if (eth_dev && priv->flex_item_map)
1973 : 0 : mlx5_flex_item_port_cleanup(eth_dev);
1974 : 0 : mlx5_free(priv->ext_rxqs);
1975 : 0 : mlx5_free(priv->ext_txqs);
1976 : 0 : mlx5_free(priv);
1977 [ # # ]: 0 : if (eth_dev != NULL)
1978 : 0 : eth_dev->data->dev_private = NULL;
1979 : : }
1980 [ # # ]: 0 : if (eth_dev != NULL) {
1981 : : /* mac_addrs must not be freed alone because part of
1982 : : * dev_private
1983 : : **/
1984 : 0 : eth_dev->data->mac_addrs = NULL;
1985 : 0 : rte_eth_dev_release_port(eth_dev);
1986 : : }
1987 : : if (sh)
1988 : 0 : mlx5_free_shared_dev_ctx(sh);
1989 [ # # ]: 0 : if (nl_rdma >= 0)
1990 : 0 : close(nl_rdma);
1991 : : MLX5_ASSERT(err > 0);
1992 : 0 : rte_errno = err;
1993 : 0 : return NULL;
1994 : : }
1995 : :
1996 : : /**
1997 : : * Comparison callback to sort device data.
1998 : : *
1999 : : * This is meant to be used with qsort().
2000 : : *
2001 : : * @param a[in]
2002 : : * Pointer to pointer to first data object.
2003 : : * @param b[in]
2004 : : * Pointer to pointer to second data object.
2005 : : *
2006 : : * @return
2007 : : * 0 if both objects are equal, less than 0 if the first argument is less
2008 : : * than the second, greater than 0 otherwise.
2009 : : */
2010 : : static int
2011 : 0 : mlx5_dev_spawn_data_cmp(const void *a, const void *b)
2012 : : {
2013 : : const struct mlx5_switch_info *si_a =
2014 : : &((const struct mlx5_dev_spawn_data *)a)->info;
2015 : : const struct mlx5_switch_info *si_b =
2016 : : &((const struct mlx5_dev_spawn_data *)b)->info;
2017 : 0 : int uplink_a = si_a->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK;
2018 : 0 : int uplink_b = si_b->name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK;
2019 : : int ret;
2020 : :
2021 : : /* Uplink ports first. */
2022 : 0 : ret = uplink_b - uplink_a;
2023 [ # # ]: 0 : if (ret)
2024 : : return ret;
2025 : : /* Then master devices. */
2026 : 0 : ret = si_b->master - si_a->master;
2027 [ # # ]: 0 : if (ret)
2028 : : return ret;
2029 : : /* Then representor devices. */
2030 : 0 : ret = si_b->representor - si_a->representor;
2031 [ # # ]: 0 : if (ret)
2032 : : return ret;
2033 : : /* Unidentified devices come last in no specific order. */
2034 [ # # ]: 0 : if (!si_a->representor)
2035 : : return 0;
2036 : : /* Order representors by name. */
2037 : 0 : return si_a->port_name - si_b->port_name;
2038 : : }
2039 : :
2040 : : /**
2041 : : * Match PCI information for possible slaves of bonding device.
2042 : : *
2043 : : * @param[in] ibdev
2044 : : * Pointer to IB device.
2045 : : * @param[in] pci_dev
2046 : : * Pointer to primary PCI address structure to match.
2047 : : * @param[in] nl_rdma
2048 : : * Netlink RDMA group socket handle.
2049 : : * @param[in] owner
2050 : : * Representor owner PF index.
2051 : : * @param[in] dev_info
2052 : : * Cached mlx5 device information.
2053 : : * @param[out] bond_info
2054 : : * Pointer to bonding information.
2055 : : *
2056 : : * @return
2057 : : * negative value if no bonding device found, otherwise
2058 : : * positive index of slave PF in bonding.
2059 : : */
2060 : : static int
2061 : 0 : mlx5_device_bond_pci_match(const struct ibv_device *ibdev,
2062 : : const struct rte_pci_addr *pci_dev,
2063 : : int nl_rdma, uint16_t owner,
2064 : : struct mlx5_dev_info *dev_info,
2065 : : struct mlx5_bond_info *bond_info)
2066 : : {
2067 : : char ifname[IF_NAMESIZE + 1];
2068 : : unsigned int ifindex;
2069 : : unsigned int np, i;
2070 : : FILE *bond_file = NULL, *file;
2071 : : int pf = -1;
2072 : : int ret;
2073 : 0 : uint8_t cur_guid[32] = {0};
2074 [ # # ]: 0 : uint8_t guid[32] = {0};
2075 : :
2076 : : /*
2077 : : * Try to get master device name. If something goes wrong suppose
2078 : : * the lack of kernel support and no bonding devices.
2079 : : */
2080 : : memset(bond_info, 0, sizeof(*bond_info));
2081 [ # # ]: 0 : if (nl_rdma < 0)
2082 : : return -1;
2083 [ # # ]: 0 : if (!mlx5_os_is_device_bond(ibdev))
2084 : : return -1;
2085 : 0 : np = mlx5_nl_portnum(nl_rdma, ibdev->name, dev_info);
2086 [ # # ]: 0 : if (!np)
2087 : : return -1;
2088 [ # # ]: 0 : if (mlx5_get_device_guid(pci_dev, cur_guid, sizeof(cur_guid)) < 0)
2089 : : return -1;
2090 : : /*
2091 : : * The master device might not be on the predefined port(not on port
2092 : : * index 1, it is not guaranteed), we have to scan all Infiniband
2093 : : * device ports and find master.
2094 : : */
2095 [ # # ]: 0 : for (i = 1; i <= np; ++i) {
2096 : : /* Check whether Infiniband port is populated. */
2097 : 0 : ifindex = mlx5_nl_ifindex(nl_rdma, ibdev->name, i, dev_info);
2098 [ # # ]: 0 : if (!ifindex)
2099 : 0 : continue;
2100 [ # # ]: 0 : if (!if_indextoname(ifindex, ifname))
2101 : 0 : continue;
2102 : : /* Try to read bonding slave names from sysfs. */
2103 : 0 : MKSTR(slaves,
2104 : : "/sys/class/net/%s/master/bonding/slaves", ifname);
2105 : 0 : bond_file = fopen(slaves, "r");
2106 [ # # ]: 0 : if (bond_file)
2107 : : break;
2108 : : }
2109 [ # # ]: 0 : if (!bond_file)
2110 : : return -1;
2111 : : /* Use safe format to check maximal buffer length. */
2112 : : MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
2113 [ # # ]: 0 : while (fscanf(bond_file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
2114 : : char tmp_str[IF_NAMESIZE + 32];
2115 : : struct rte_pci_addr pci_addr;
2116 : : struct mlx5_switch_info info;
2117 : : int ret;
2118 : :
2119 : : /* Process slave interface names in the loop. */
2120 : : snprintf(tmp_str, sizeof(tmp_str),
2121 : : "/sys/class/net/%s", ifname);
2122 [ # # ]: 0 : if (mlx5_get_pci_addr(tmp_str, &pci_addr)) {
2123 : 0 : DRV_LOG(WARNING,
2124 : : "Cannot get PCI address for netdev \"%s\".",
2125 : : ifname);
2126 : 0 : continue;
2127 : : }
2128 : : /* Slave interface PCI address match found. */
2129 : : snprintf(tmp_str, sizeof(tmp_str),
2130 : : "/sys/class/net/%s/phys_port_name", ifname);
2131 : 0 : file = fopen(tmp_str, "rb");
2132 [ # # ]: 0 : if (!file)
2133 : : break;
2134 : 0 : info.name_type = MLX5_PHYS_PORT_NAME_TYPE_NOTSET;
2135 [ # # ]: 0 : if (fscanf(file, "%32s", tmp_str) == 1) {
2136 : 0 : mlx5_translate_port_name(tmp_str, &info);
2137 : 0 : fclose(file);
2138 : : } else {
2139 : 0 : fclose(file);
2140 : 0 : break;
2141 : : }
2142 : : /* Only process PF ports. */
2143 [ # # ]: 0 : if (info.name_type != MLX5_PHYS_PORT_NAME_TYPE_LEGACY &&
2144 : : info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2145 : 0 : continue;
2146 : : /* Check max bonding member. */
2147 [ # # ]: 0 : if (info.port_name >= MLX5_BOND_MAX_PORTS) {
2148 : 0 : DRV_LOG(WARNING, "bonding index out of range, "
2149 : : "please increase MLX5_BOND_MAX_PORTS: %s",
2150 : : tmp_str);
2151 : 0 : break;
2152 : : }
2153 : : /* Get ifindex. */
2154 : : snprintf(tmp_str, sizeof(tmp_str),
2155 : : "/sys/class/net/%s/ifindex", ifname);
2156 : 0 : file = fopen(tmp_str, "rb");
2157 [ # # ]: 0 : if (!file)
2158 : : break;
2159 : 0 : ret = fscanf(file, "%u", &ifindex);
2160 : 0 : fclose(file);
2161 [ # # ]: 0 : if (ret != 1)
2162 : : break;
2163 : : /* Save bonding info. */
2164 : 0 : snprintf(bond_info->ports[info.port_name].ifname,
2165 : : sizeof(bond_info->ports[0].ifname), "%s", ifname);
2166 : 0 : bond_info->ports[info.port_name].pci_addr = pci_addr;
2167 : 0 : bond_info->ports[info.port_name].ifindex = ifindex;
2168 : 0 : bond_info->n_port++;
2169 : : /*
2170 : : * Under socket direct mode, bonding will use
2171 : : * system_image_guid as identification.
2172 : : * After OFED 5.4, guid is readable (ret >= 0) under sysfs.
2173 : : * All bonding members should have the same guid even if driver
2174 : : * is using PCIe BDF.
2175 : : */
2176 : 0 : ret = mlx5_get_device_guid(&pci_addr, guid, sizeof(guid));
2177 [ # # ]: 0 : if (ret < 0)
2178 : : break;
2179 [ # # ]: 0 : else if (ret > 0) {
2180 [ # # ]: 0 : if (!memcmp(guid, cur_guid, sizeof(guid)) &&
2181 [ # # # # ]: 0 : owner == info.port_name &&
2182 [ # # ]: 0 : (owner != 0 || (owner == 0 &&
2183 : 0 : !rte_pci_addr_cmp(pci_dev, &pci_addr))))
2184 : 0 : pf = info.port_name;
2185 [ # # ]: 0 : } else if (pci_dev->domain == pci_addr.domain &&
2186 [ # # ]: 0 : pci_dev->bus == pci_addr.bus &&
2187 : 0 : pci_dev->devid == pci_addr.devid &&
2188 [ # # ]: 0 : ((pci_dev->function == 0 &&
2189 [ # # ]: 0 : pci_dev->function + owner == pci_addr.function) ||
2190 [ # # ]: 0 : (pci_dev->function == owner &&
2191 [ # # ]: 0 : pci_addr.function == owner)))
2192 : 0 : pf = info.port_name;
2193 : : }
2194 : 0 : fclose(bond_file);
2195 [ # # ]: 0 : if (pf >= 0) {
2196 : : /* Get bond interface info */
2197 : 0 : ret = mlx5_sysfs_bond_info(ifindex, &bond_info->ifindex,
2198 : 0 : bond_info->ifname);
2199 [ # # ]: 0 : if (ret)
2200 : 0 : DRV_LOG(ERR, "unable to get bond info: %s",
2201 : : strerror(rte_errno));
2202 : : else
2203 : 0 : DRV_LOG(INFO, "PF device %u, bond device %u(%s)",
2204 : : ifindex, bond_info->ifindex, bond_info->ifname);
2205 : : }
2206 [ # # ]: 0 : if (owner == 0 && pf != 0) {
2207 : 0 : DRV_LOG(INFO, "PCIe instance " PCI_PRI_FMT " isn't bonding owner",
2208 : : pci_dev->domain, pci_dev->bus, pci_dev->devid,
2209 : : pci_dev->function);
2210 : : }
2211 : : return pf;
2212 : : }
2213 : :
2214 : : static int
2215 : 0 : mlx5_nl_esw_multiport_get(struct rte_pci_addr *pci_addr, int *enabled)
2216 : : {
2217 : 0 : char pci_addr_str[PCI_PRI_STR_SIZE] = { 0 };
2218 : : int nlsk_fd;
2219 : : int devlink_id;
2220 : : int ret;
2221 : :
2222 : : /* Provide correct value to have defined enabled state in case of an error. */
2223 : 0 : *enabled = 0;
2224 : 0 : rte_pci_device_name(pci_addr, pci_addr_str, sizeof(pci_addr_str));
2225 : 0 : nlsk_fd = mlx5_nl_init(NETLINK_GENERIC, 0);
2226 [ # # ]: 0 : if (nlsk_fd < 0)
2227 : : return nlsk_fd;
2228 : 0 : devlink_id = mlx5_nl_devlink_family_id_get(nlsk_fd);
2229 [ # # ]: 0 : if (devlink_id < 0) {
2230 : : ret = devlink_id;
2231 : 0 : DRV_LOG(DEBUG, "Unable to get devlink family id for Multiport E-Switch checks "
2232 : : "by netlink, for PCI device %s", pci_addr_str);
2233 : 0 : goto close_nlsk_fd;
2234 : : }
2235 : 0 : ret = mlx5_nl_devlink_esw_multiport_get(nlsk_fd, devlink_id, pci_addr_str, enabled);
2236 [ # # ]: 0 : if (ret < 0)
2237 : 0 : DRV_LOG(DEBUG, "Unable to get Multiport E-Switch state by Netlink.");
2238 : 0 : close_nlsk_fd:
2239 : 0 : close(nlsk_fd);
2240 : 0 : return ret;
2241 : : }
2242 : :
2243 : : #define SYSFS_MPESW_PARAM_MAX_LEN 16
2244 : :
2245 : : static int
2246 : 0 : mlx5_sysfs_esw_multiport_get(struct ibv_device *ibv, struct rte_pci_addr *pci_addr, int *enabled,
2247 : : struct mlx5_dev_info *dev_info)
2248 : : {
2249 : : int nl_rdma;
2250 : : unsigned int n_ports;
2251 : : unsigned int i;
2252 : : int ret;
2253 : :
2254 : : /* Provide correct value to have defined enabled state in case of an error. */
2255 : 0 : *enabled = 0;
2256 : 0 : nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
2257 [ # # ]: 0 : if (nl_rdma < 0)
2258 : : return nl_rdma;
2259 : 0 : n_ports = mlx5_nl_portnum(nl_rdma, ibv->name, dev_info);
2260 [ # # ]: 0 : if (!n_ports) {
2261 : 0 : ret = -rte_errno;
2262 : 0 : goto close_nl_rdma;
2263 : : }
2264 [ # # ]: 0 : for (i = 1; i <= n_ports; ++i) {
2265 : : unsigned int ifindex;
2266 : : char ifname[IF_NAMESIZE + 1];
2267 : 0 : struct rte_pci_addr if_pci_addr = { 0 };
2268 : : char mpesw[SYSFS_MPESW_PARAM_MAX_LEN + 1];
2269 : : FILE *sysfs;
2270 : : int n;
2271 : :
2272 : 0 : ifindex = mlx5_nl_ifindex(nl_rdma, ibv->name, i, dev_info);
2273 [ # # ]: 0 : if (!ifindex)
2274 : 0 : continue;
2275 [ # # ]: 0 : if (!if_indextoname(ifindex, ifname))
2276 : 0 : continue;
2277 : 0 : MKSTR(sysfs_if_path, "/sys/class/net/%s", ifname);
2278 [ # # ]: 0 : if (mlx5_get_pci_addr(sysfs_if_path, &if_pci_addr))
2279 : 0 : continue;
2280 [ # # ]: 0 : if (pci_addr->domain != if_pci_addr.domain ||
2281 [ # # ]: 0 : pci_addr->bus != if_pci_addr.bus ||
2282 : 0 : pci_addr->devid != if_pci_addr.devid ||
2283 [ # # ]: 0 : pci_addr->function != if_pci_addr.function)
2284 : 0 : continue;
2285 : 0 : MKSTR(sysfs_mpesw_path,
2286 : : "/sys/class/net/%s/compat/devlink/lag_port_select_mode", ifname);
2287 : 0 : sysfs = fopen(sysfs_mpesw_path, "r");
2288 [ # # ]: 0 : if (!sysfs)
2289 : 0 : continue;
2290 : 0 : n = fscanf(sysfs, "%" RTE_STR(SYSFS_MPESW_PARAM_MAX_LEN) "s", mpesw);
2291 : 0 : fclose(sysfs);
2292 [ # # ]: 0 : if (n != 1)
2293 : 0 : continue;
2294 : : ret = 0;
2295 [ # # ]: 0 : if (strcmp(mpesw, "multiport_esw") == 0) {
2296 : 0 : *enabled = 1;
2297 : 0 : break;
2298 : : }
2299 : 0 : *enabled = 0;
2300 : 0 : break;
2301 : : }
2302 [ # # ]: 0 : if (i > n_ports) {
2303 : 0 : DRV_LOG(DEBUG, "Unable to get Multiport E-Switch state by sysfs.");
2304 : 0 : rte_errno = ENOENT;
2305 : : ret = -rte_errno;
2306 : : }
2307 : :
2308 : 0 : close_nl_rdma:
2309 : 0 : close(nl_rdma);
2310 : 0 : return ret;
2311 : : }
2312 : :
2313 : : static int
2314 : 0 : mlx5_is_mpesw_enabled(struct ibv_device *ibv, struct rte_pci_addr *ibv_pci_addr, int *enabled,
2315 : : struct mlx5_dev_info *dev_info)
2316 : : {
2317 : : /*
2318 : : * Try getting Multiport E-Switch state through netlink interface
2319 : : * If unable, try sysfs interface. If that is unable as well,
2320 : : * assume that Multiport E-Switch is disabled and return an error.
2321 : : */
2322 [ # # # # ]: 0 : if (mlx5_nl_esw_multiport_get(ibv_pci_addr, enabled) >= 0 ||
2323 : 0 : mlx5_sysfs_esw_multiport_get(ibv, ibv_pci_addr, enabled, dev_info) >= 0)
2324 : 0 : return 0;
2325 : 0 : DRV_LOG(DEBUG, "Unable to check MPESW state for IB device %s "
2326 : : "(PCI: " PCI_PRI_FMT ")",
2327 : : ibv->name,
2328 : : ibv_pci_addr->domain, ibv_pci_addr->bus,
2329 : : ibv_pci_addr->devid, ibv_pci_addr->function);
2330 : 0 : *enabled = 0;
2331 : 0 : return -rte_errno;
2332 : : }
2333 : :
2334 : : static int
2335 : 0 : mlx5_device_mpesw_pci_match(struct ibv_device *ibv,
2336 : : const struct rte_pci_addr *owner_pci,
2337 : : int nl_rdma, struct mlx5_dev_info *dev_info)
2338 : : {
2339 : 0 : struct rte_pci_addr ibdev_pci_addr = { 0 };
2340 : 0 : char ifname[IF_NAMESIZE + 1] = { 0 };
2341 : : unsigned int ifindex;
2342 : : unsigned int np;
2343 : : unsigned int i;
2344 : 0 : int enabled = 0;
2345 : : int ret;
2346 : :
2347 : : /* Check if IB device's PCI address matches the probed PCI address. */
2348 [ # # ]: 0 : if (mlx5_get_pci_addr(ibv->ibdev_path, &ibdev_pci_addr)) {
2349 : 0 : DRV_LOG(DEBUG, "Skipping MPESW check for IB device %s since "
2350 : : "there is no underlying PCI device", ibv->name);
2351 : 0 : rte_errno = ENOENT;
2352 : 0 : return -rte_errno;
2353 : : }
2354 [ # # ]: 0 : if (ibdev_pci_addr.domain != owner_pci->domain ||
2355 [ # # ]: 0 : ibdev_pci_addr.bus != owner_pci->bus ||
2356 : 0 : ibdev_pci_addr.devid != owner_pci->devid ||
2357 [ # # ]: 0 : ibdev_pci_addr.function != owner_pci->function) {
2358 : : return -1;
2359 : : }
2360 : : /* Check if IB device has MPESW enabled. */
2361 [ # # ]: 0 : if (mlx5_is_mpesw_enabled(ibv, &ibdev_pci_addr, &enabled, dev_info))
2362 : : return -1;
2363 [ # # ]: 0 : if (!enabled)
2364 : : return -1;
2365 : : /* Iterate through IB ports to find MPESW master uplink port. */
2366 [ # # ]: 0 : if (nl_rdma < 0)
2367 : : return -1;
2368 : 0 : np = mlx5_nl_portnum(nl_rdma, ibv->name, dev_info);
2369 [ # # ]: 0 : if (!np)
2370 : : return -1;
2371 [ # # ]: 0 : for (i = 1; i <= np; ++i) {
2372 : 0 : struct rte_pci_addr pci_addr = { 0 };
2373 : : FILE *file;
2374 : : char port_name[IF_NAMESIZE + 1];
2375 : : struct mlx5_switch_info info;
2376 : :
2377 : : /* Check whether IB port has a corresponding netdev. */
2378 : 0 : ifindex = mlx5_nl_ifindex(nl_rdma, ibv->name, i, dev_info);
2379 [ # # ]: 0 : if (!ifindex)
2380 : 0 : continue;
2381 [ # # ]: 0 : if (!if_indextoname(ifindex, ifname))
2382 : 0 : continue;
2383 : : /* Read port name and determine its type. */
2384 : 0 : MKSTR(ifphysportname, "/sys/class/net/%s/phys_port_name", ifname);
2385 : 0 : file = fopen(ifphysportname, "rb");
2386 [ # # ]: 0 : if (!file)
2387 : 0 : continue;
2388 : 0 : ret = fscanf(file, "%16s", port_name);
2389 : 0 : fclose(file);
2390 [ # # ]: 0 : if (ret != 1)
2391 : 0 : continue;
2392 : : memset(&info, 0, sizeof(info));
2393 : 0 : mlx5_translate_port_name(port_name, &info);
2394 [ # # ]: 0 : if (info.name_type != MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2395 : 0 : continue;
2396 : : /* Fetch PCI address of the device to which the netdev is bound. */
2397 : 0 : MKSTR(ifpath, "/sys/class/net/%s", ifname);
2398 [ # # ]: 0 : if (mlx5_get_pci_addr(ifpath, &pci_addr))
2399 : 0 : continue;
2400 [ # # ]: 0 : if (pci_addr.domain == ibdev_pci_addr.domain &&
2401 : : pci_addr.bus == ibdev_pci_addr.bus &&
2402 [ # # ]: 0 : pci_addr.devid == ibdev_pci_addr.devid &&
2403 : : pci_addr.function == ibdev_pci_addr.function) {
2404 : : MLX5_ASSERT(info.port_name >= 0);
2405 : 0 : return info.port_name;
2406 : : }
2407 : : }
2408 : : /* No matching MPESW uplink port was found. */
2409 : : return -1;
2410 : : }
2411 : :
2412 : : static void
2413 : 0 : calc_nb_uplinks_hpfs(struct ibv_device **ibv_match,
2414 : : unsigned int nd,
2415 : : struct mlx5_dev_spawn_data *list,
2416 : : unsigned int ns)
2417 : : {
2418 [ # # ]: 0 : for (unsigned int i = 0; i != nd; i++) {
2419 : : uint32_t nb_uplinks = 0;
2420 : : uint32_t nb_hpfs = 0;
2421 : : uint32_t j;
2422 : :
2423 [ # # ]: 0 : for (unsigned int j = 0; j != ns; j++) {
2424 [ # # ]: 0 : if (strcmp(ibv_match[i]->name, list[j].phys_dev_name) != 0)
2425 : 0 : continue;
2426 : :
2427 [ # # ]: 0 : if (list[j].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK)
2428 : 0 : nb_uplinks++;
2429 [ # # ]: 0 : else if (list[j].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_PFHPF)
2430 : 0 : nb_hpfs++;
2431 : : }
2432 : :
2433 [ # # ]: 0 : if (nb_uplinks > 0 || nb_hpfs > 0) {
2434 [ # # ]: 0 : for (j = 0; j != ns; j++) {
2435 [ # # ]: 0 : if (strcmp(ibv_match[i]->name, list[j].phys_dev_name) != 0)
2436 : 0 : continue;
2437 : :
2438 : 0 : list[j].nb_uplinks = nb_uplinks;
2439 : 0 : list[j].nb_hpfs = nb_hpfs;
2440 : : }
2441 : :
2442 : 0 : DRV_LOG(DEBUG, "IB device %s has %u uplinks, %u host PFs",
2443 : : ibv_match[i]->name,
2444 : : nb_uplinks,
2445 : : nb_hpfs);
2446 : : } else {
2447 : 0 : DRV_LOG(DEBUG, "IB device %s unable to recognize uplinks/host PFs",
2448 : : ibv_match[i]->name);
2449 : : }
2450 : : }
2451 : 0 : }
2452 : :
2453 : : /**
2454 : : * Register a PCI device within bonding.
2455 : : *
2456 : : * This function spawns Ethernet devices out of a given PCI device and
2457 : : * bonding owner PF index.
2458 : : *
2459 : : * @param[in] cdev
2460 : : * Pointer to common mlx5 device structure.
2461 : : * @param[in] req_eth_da
2462 : : * Requested ethdev device argument.
2463 : : * @param[in] owner_id
2464 : : * Requested owner PF port ID within bonding device, default to 0.
2465 : : * @param[in, out] mkvlist
2466 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
2467 : : *
2468 : : * @return
2469 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
2470 : : */
2471 : : static int
2472 : 0 : mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev,
2473 : : struct rte_eth_devargs *req_eth_da,
2474 : : uint16_t owner_id, struct mlx5_kvargs_ctrl *mkvlist)
2475 : 0 : {
2476 : : struct ibv_device **ibv_list;
2477 : : /*
2478 : : * Number of found IB Devices matching with requested PCI BDF.
2479 : : * nd != 1 means there are multiple IB devices over the same
2480 : : * PCI device and we have representors and master.
2481 : : */
2482 : : unsigned int nd = 0;
2483 : : /*
2484 : : * Number of found IB device Ports. nd = 1 and np = 1..n means
2485 : : * we have the single multiport IB device, and there may be
2486 : : * representors attached to some of found ports.
2487 : : */
2488 : : unsigned int np = 0;
2489 : : /*
2490 : : * Number of DPDK ethernet devices to Spawn - either over
2491 : : * multiple IB devices or multiple ports of single IB device.
2492 : : * Actually this is the number of iterations to spawn.
2493 : : */
2494 : : unsigned int ns = 0;
2495 : : /*
2496 : : * Bonding device
2497 : : * < 0 - no bonding device (single one)
2498 : : * >= 0 - bonding device (value is slave PF index)
2499 : : */
2500 : : int bd = -1;
2501 : : /*
2502 : : * Multiport E-Switch (MPESW) device:
2503 : : * < 0 - no MPESW device or could not determine if it is MPESW device,
2504 : : * >= 0 - MPESW device. Value is the port index of the MPESW owner.
2505 : : */
2506 : : int mpesw = MLX5_MPESW_PORT_INVALID;
2507 : 0 : struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
2508 : : struct mlx5_dev_spawn_data *list = NULL;
2509 : 0 : struct rte_eth_devargs eth_da = *req_eth_da;
2510 : 0 : struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
2511 : : struct mlx5_bond_info bond_info;
2512 : 0 : int ret = -1;
2513 : :
2514 : 0 : errno = 0;
2515 : 0 : ibv_list = mlx5_glue->get_device_list(&ret);
2516 [ # # ]: 0 : if (!ibv_list) {
2517 [ # # ]: 0 : rte_errno = errno ? errno : ENOSYS;
2518 : 0 : DRV_LOG(ERR, "Cannot list devices, is ib_uverbs loaded?");
2519 : 0 : return -rte_errno;
2520 : : }
2521 : : /*
2522 : : * First scan the list of all Infiniband devices to find
2523 : : * matching ones, gathering into the list.
2524 : : */
2525 : 0 : struct ibv_device *ibv_match[ret + 1];
2526 : 0 : struct mlx5_dev_info *info, tmp_info[ret];
2527 : 0 : int nl_route = mlx5_nl_init(NETLINK_ROUTE, 0);
2528 : 0 : int nl_rdma = mlx5_nl_init(NETLINK_RDMA, 0);
2529 : : unsigned int i;
2530 : :
2531 : : memset(tmp_info, 0, sizeof(tmp_info));
2532 [ # # ]: 0 : while (ret-- > 0) {
2533 : : struct rte_pci_addr pci_addr;
2534 : :
2535 [ # # # # ]: 0 : if (cdev->config.probe_opt && cdev->dev_info.port_num) {
2536 [ # # ]: 0 : if (strcmp(ibv_list[ret]->name, cdev->dev_info.ibname)) {
2537 : 0 : DRV_LOG(INFO, "Unmatched caching device \"%s\" \"%s\"",
2538 : : cdev->dev_info.ibname, ibv_list[ret]->name);
2539 : 0 : continue;
2540 : : }
2541 : 0 : info = &cdev->dev_info;
2542 : : } else {
2543 : 0 : info = &tmp_info[ret];
2544 : : }
2545 : 0 : DRV_LOG(DEBUG, "Checking device \"%s\"", ibv_list[ret]->name);
2546 : 0 : bd = mlx5_device_bond_pci_match(ibv_list[ret], &owner_pci,
2547 : : nl_rdma, owner_id,
2548 : : info,
2549 : : &bond_info);
2550 [ # # ]: 0 : if (bd >= 0) {
2551 : : /*
2552 : : * Bonding device detected. Only one match is allowed,
2553 : : * the bonding is supported over multi-port IB device,
2554 : : * there should be no matches on representor PCI
2555 : : * functions or non VF LAG bonding devices with
2556 : : * specified address.
2557 : : */
2558 [ # # ]: 0 : if (nd) {
2559 : 0 : DRV_LOG(ERR,
2560 : : "multiple PCI match on bonding device"
2561 : : "\"%s\" found", ibv_list[ret]->name);
2562 : 0 : rte_errno = ENOENT;
2563 : 0 : ret = -rte_errno;
2564 : 0 : goto exit;
2565 : : }
2566 : : /* Amend owner pci address if owner PF ID specified. */
2567 [ # # ]: 0 : if (eth_da.nb_representor_ports)
2568 : 0 : owner_pci.function += owner_id;
2569 : 0 : DRV_LOG(INFO,
2570 : : "PCI information matches for slave %d bonding device \"%s\"",
2571 : : bd, ibv_list[ret]->name);
2572 : 0 : ibv_match[nd++] = ibv_list[ret];
2573 : 0 : break;
2574 : : }
2575 : 0 : mpesw = mlx5_device_mpesw_pci_match(ibv_list[ret], &owner_pci, nl_rdma,
2576 : : info);
2577 [ # # ]: 0 : if (mpesw >= 0) {
2578 : : /*
2579 : : * MPESW device detected. Only one matching IB device is allowed,
2580 : : * so if any matches were found previously, fail gracefully.
2581 : : */
2582 [ # # ]: 0 : if (nd) {
2583 : 0 : DRV_LOG(ERR,
2584 : : "PCI information matches MPESW device \"%s\", "
2585 : : "but multiple matching PCI devices were found. "
2586 : : "Probing failed.",
2587 : : ibv_list[ret]->name);
2588 : 0 : rte_errno = ENOENT;
2589 : 0 : ret = -rte_errno;
2590 : 0 : goto exit;
2591 : : }
2592 : 0 : DRV_LOG(INFO,
2593 : : "PCI information matches MPESW device \"%s\"",
2594 : : ibv_list[ret]->name);
2595 : 0 : ibv_match[nd++] = ibv_list[ret];
2596 : 0 : break;
2597 : : }
2598 : : /* Bonding or MPESW device was not found. */
2599 [ # # ]: 0 : if (mlx5_get_pci_addr(ibv_list[ret]->ibdev_path,
2600 : : &pci_addr)) {
2601 [ # # ]: 0 : if (tmp_info[ret].port_info != NULL)
2602 : 0 : mlx5_free(tmp_info[ret].port_info);
2603 : 0 : memset(&tmp_info[ret], 0, sizeof(tmp_info[0]));
2604 : 0 : continue;
2605 : : }
2606 [ # # ]: 0 : if (rte_pci_addr_cmp(&owner_pci, &pci_addr) != 0) {
2607 [ # # ]: 0 : if (tmp_info[ret].port_info != NULL)
2608 : 0 : mlx5_free(tmp_info[ret].port_info);
2609 : 0 : memset(&tmp_info[ret], 0, sizeof(tmp_info[0]));
2610 : 0 : continue;
2611 : : }
2612 : 0 : DRV_LOG(INFO, "PCI information matches for device \"%s\"",
2613 : : ibv_list[ret]->name);
2614 : 0 : ibv_match[nd++] = ibv_list[ret];
2615 : : }
2616 : 0 : ibv_match[nd] = NULL;
2617 [ # # ]: 0 : if (!nd) {
2618 : : /* No device matches, just complain and bail out. */
2619 : 0 : DRV_LOG(WARNING,
2620 : : "PF %u doesn't have Verbs device matches PCI device " PCI_PRI_FMT ","
2621 : : " are kernel drivers loaded?",
2622 : : owner_id, owner_pci.domain, owner_pci.bus,
2623 : : owner_pci.devid, owner_pci.function);
2624 : 0 : rte_errno = ENOENT;
2625 : 0 : ret = -rte_errno;
2626 : 0 : goto exit;
2627 : : }
2628 [ # # ]: 0 : if (nd == 1) {
2629 [ # # ]: 0 : if (!cdev->dev_info.port_num) {
2630 [ # # ]: 0 : for (i = 0; i < RTE_DIM(tmp_info); i++) {
2631 [ # # ]: 0 : if (tmp_info[i].port_num) {
2632 : 0 : cdev->dev_info = tmp_info[i];
2633 : 0 : break;
2634 : : }
2635 : : }
2636 : : }
2637 : : /*
2638 : : * Found single matching device may have multiple ports.
2639 : : * Each port may be representor, we have to check the port
2640 : : * number and check the representors existence.
2641 : : */
2642 [ # # ]: 0 : if (nl_rdma >= 0)
2643 : 0 : np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name, &cdev->dev_info);
2644 [ # # ]: 0 : if (!np)
2645 : 0 : DRV_LOG(WARNING,
2646 : : "Cannot get IB device \"%s\" ports number.",
2647 : : ibv_match[0]->name);
2648 [ # # ]: 0 : if (bd >= 0 && !np) {
2649 : 0 : DRV_LOG(ERR, "Cannot get ports for bonding device.");
2650 : 0 : rte_errno = ENOENT;
2651 : 0 : ret = -rte_errno;
2652 : 0 : goto exit;
2653 : : }
2654 [ # # ]: 0 : if (mpesw >= 0 && !np) {
2655 : 0 : DRV_LOG(ERR, "Cannot get ports for MPESW device.");
2656 : 0 : rte_errno = ENOENT;
2657 : 0 : ret = -rte_errno;
2658 : 0 : goto exit;
2659 : : }
2660 : : } else {
2661 : : /* Can't handle one common device with multiple IB devices caching */
2662 [ # # ]: 0 : for (i = 0; i < RTE_DIM(tmp_info); i++) {
2663 [ # # ]: 0 : if (tmp_info[i].port_info != NULL)
2664 : 0 : mlx5_free(tmp_info[i].port_info);
2665 : 0 : memset(&tmp_info[i], 0, sizeof(tmp_info[0]));
2666 : : }
2667 : 0 : DRV_LOG(INFO, "Cannot handle multiple IB devices info caching in single common device.");
2668 : : }
2669 : : /* Now we can determine the maximal amount of devices to be spawned. */
2670 [ # # ]: 0 : list = mlx5_malloc(MLX5_MEM_ZERO,
2671 : 0 : sizeof(struct mlx5_dev_spawn_data) * (np ? np : nd),
2672 : : RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
2673 [ # # ]: 0 : if (!list) {
2674 : 0 : DRV_LOG(ERR, "Spawn data array allocation failure.");
2675 : 0 : rte_errno = ENOMEM;
2676 : 0 : ret = -rte_errno;
2677 : 0 : goto exit;
2678 : : }
2679 [ # # # # ]: 0 : if (bd >= 0 || mpesw >= 0 || np > 1) {
2680 : : /*
2681 : : * Single IB device with multiple ports found,
2682 : : * it may be E-Switch master device and representors.
2683 : : * We have to perform identification through the ports.
2684 : : */
2685 : : MLX5_ASSERT(nl_rdma >= 0);
2686 : : MLX5_ASSERT(ns == 0);
2687 : : MLX5_ASSERT(nd == 1);
2688 : : MLX5_ASSERT(np);
2689 [ # # ]: 0 : for (i = 1; i <= np; ++i) {
2690 : 0 : list[ns].bond_info = &bond_info;
2691 : 0 : list[ns].max_port = np;
2692 : 0 : list[ns].phys_port = i;
2693 : 0 : list[ns].phys_dev_name = ibv_match[0]->name;
2694 : 0 : list[ns].eth_dev = NULL;
2695 : 0 : list[ns].pci_dev = pci_dev;
2696 : 0 : list[ns].cdev = cdev;
2697 : 0 : list[ns].pf_bond = bd;
2698 : 0 : list[ns].mpesw_port = MLX5_MPESW_PORT_INVALID;
2699 : 0 : list[ns].ifindex = mlx5_nl_ifindex(nl_rdma,
2700 : : ibv_match[0]->name,
2701 : : i, &cdev->dev_info);
2702 [ # # ]: 0 : if (!list[ns].ifindex) {
2703 : : /*
2704 : : * No network interface index found for the
2705 : : * specified port, it means there is no
2706 : : * representor on this port. It's OK,
2707 : : * there can be disabled ports, for example
2708 : : * if sriov_numvfs < sriov_totalvfs.
2709 : : */
2710 : 0 : continue;
2711 : : }
2712 : 0 : ret = -1;
2713 [ # # ]: 0 : if (nl_route >= 0)
2714 : 0 : ret = mlx5_nl_switch_info(nl_route,
2715 : : list[ns].ifindex,
2716 : : &list[ns].info);
2717 [ # # # # ]: 0 : if (ret || (!list[ns].info.representor &&
2718 : : !list[ns].info.master)) {
2719 : : /*
2720 : : * We failed to recognize representors with
2721 : : * Netlink, let's try to perform the task
2722 : : * with sysfs.
2723 : : */
2724 : 0 : ret = mlx5_sysfs_switch_info(list[ns].ifindex,
2725 : : &list[ns].info);
2726 : : }
2727 [ # # # # ]: 0 : if (!ret && bd >= 0) {
2728 [ # # # ]: 0 : switch (list[ns].info.name_type) {
2729 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2730 [ # # ]: 0 : if (np == 1) {
2731 : : /*
2732 : : * Force standalone bonding
2733 : : * device for ROCE LAG
2734 : : * configurations.
2735 : : */
2736 : 0 : list[ns].info.master = 0;
2737 : 0 : list[ns].info.representor = 0;
2738 : : }
2739 : 0 : ns++;
2740 : 0 : break;
2741 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
2742 : : /* Fallthrough */
2743 : : case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2744 : : /* Fallthrough */
2745 : : case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
2746 [ # # ]: 0 : if (list[ns].info.pf_num == bd)
2747 : 0 : ns++;
2748 : : break;
2749 : : default:
2750 : : break;
2751 : : }
2752 : 0 : continue;
2753 : : }
2754 [ # # # # ]: 0 : if (!ret && mpesw >= 0) {
2755 [ # # # ]: 0 : switch (list[ns].info.name_type) {
2756 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
2757 : : /* Owner port is treated as master port. */
2758 [ # # ]: 0 : if (list[ns].info.port_name == mpesw) {
2759 : 0 : list[ns].info.master = 1;
2760 : 0 : list[ns].info.representor = 0;
2761 : : } else {
2762 : 0 : list[ns].info.master = 0;
2763 : 0 : list[ns].info.representor = 1;
2764 : : }
2765 : : /*
2766 : : * Ports of this type have uplink port index
2767 : : * encoded in the name. This index is also a PF index.
2768 : : */
2769 : 0 : list[ns].info.pf_num = list[ns].info.port_name;
2770 : 0 : list[ns].mpesw_port = list[ns].info.port_name;
2771 : 0 : list[ns].info.mpesw_owner = mpesw;
2772 : 0 : ns++;
2773 : 0 : break;
2774 : 0 : case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
2775 : : case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
2776 : : case MLX5_PHYS_PORT_NAME_TYPE_PFSF:
2777 : : /*
2778 : : * Ports of this type have PF index encoded in name,
2779 : : * which translate to the related uplink port index.
2780 : : */
2781 : 0 : list[ns].mpesw_port = list[ns].info.pf_num;
2782 : : /* MPESW owner is also saved but not used now. */
2783 : 0 : list[ns].info.mpesw_owner = mpesw;
2784 : 0 : ns++;
2785 : 0 : break;
2786 : : default:
2787 : : break;
2788 : : }
2789 : 0 : continue;
2790 : : }
2791 [ # # ]: 0 : if (!ret && (list[ns].info.representor ^
2792 [ # # ]: 0 : list[ns].info.master))
2793 : 0 : ns++;
2794 : : }
2795 : : } else {
2796 : : /*
2797 : : * The existence of several matching entries (nd > 1) means
2798 : : * port representors have been instantiated. No existing Verbs
2799 : : * call nor sysfs entries can tell them apart, this can only
2800 : : * be done through Netlink calls assuming kernel drivers are
2801 : : * recent enough to support them.
2802 : : *
2803 : : * In the event of identification failure through Netlink,
2804 : : * try again through sysfs, then:
2805 : : *
2806 : : * 1. A single IB device matches (nd == 1) with single
2807 : : * port (np=0/1) and is not a representor, assume
2808 : : * no switch support.
2809 : : *
2810 : : * 2. Otherwise no safe assumptions can be made;
2811 : : * complain louder and bail out.
2812 : : */
2813 [ # # ]: 0 : for (i = 0; i != nd; ++i) {
2814 [ # # ]: 0 : memset(&list[ns].info, 0, sizeof(list[ns].info));
2815 : 0 : list[ns].bond_info = NULL;
2816 : 0 : list[ns].max_port = 1;
2817 : 0 : list[ns].phys_port = 1;
2818 : 0 : list[ns].phys_dev_name = ibv_match[i]->name;
2819 : 0 : list[ns].eth_dev = NULL;
2820 : 0 : list[ns].pci_dev = pci_dev;
2821 : 0 : list[ns].cdev = cdev;
2822 : 0 : list[ns].pf_bond = -1;
2823 : 0 : list[ns].mpesw_port = MLX5_MPESW_PORT_INVALID;
2824 : 0 : list[ns].ifindex = 0;
2825 [ # # ]: 0 : if (nl_rdma >= 0)
2826 : 0 : list[ns].ifindex = mlx5_nl_ifindex
2827 : : (nl_rdma,
2828 : : ibv_match[i]->name,
2829 : : 1, &cdev->dev_info);
2830 [ # # ]: 0 : if (!list[ns].ifindex) {
2831 : : char ifname[IF_NAMESIZE];
2832 : :
2833 : : /*
2834 : : * Netlink failed, it may happen with old
2835 : : * ib_core kernel driver (before 4.16).
2836 : : * We can assume there is old driver because
2837 : : * here we are processing single ports IB
2838 : : * devices. Let's try sysfs to retrieve
2839 : : * the ifindex. The method works for
2840 : : * master device only.
2841 : : */
2842 [ # # ]: 0 : if (nd > 1) {
2843 : : /*
2844 : : * Multiple devices found, assume
2845 : : * representors, can not distinguish
2846 : : * master/representor and retrieve
2847 : : * ifindex via sysfs.
2848 : : */
2849 : 0 : continue;
2850 : : }
2851 : 0 : ret = mlx5_get_ifname_sysfs
2852 : 0 : (ibv_match[i]->ibdev_path, ifname);
2853 [ # # ]: 0 : if (!ret)
2854 : 0 : list[ns].ifindex =
2855 : 0 : if_nametoindex(ifname);
2856 [ # # ]: 0 : if (!list[ns].ifindex) {
2857 : : /*
2858 : : * No network interface index found
2859 : : * for the specified device, it means
2860 : : * there it is neither representor
2861 : : * nor master.
2862 : : */
2863 : 0 : continue;
2864 : : }
2865 : : }
2866 : 0 : ret = -1;
2867 [ # # ]: 0 : if (nl_route >= 0)
2868 : 0 : ret = mlx5_nl_switch_info(nl_route,
2869 : : list[ns].ifindex,
2870 : : &list[ns].info);
2871 [ # # # # ]: 0 : if (ret || (!list[ns].info.representor &&
2872 : : !list[ns].info.master)) {
2873 : : /*
2874 : : * We failed to recognize representors with
2875 : : * Netlink, let's try to perform the task
2876 : : * with sysfs.
2877 : : */
2878 : 0 : ret = mlx5_sysfs_switch_info(list[ns].ifindex,
2879 : : &list[ns].info);
2880 : : }
2881 [ # # ]: 0 : if (!ret && (list[ns].info.representor ^
2882 [ # # ]: 0 : list[ns].info.master)) {
2883 : 0 : ns++;
2884 [ # # ]: 0 : } else if ((nd == 1) &&
2885 [ # # ]: 0 : !list[ns].info.representor &&
2886 : : !list[ns].info.master) {
2887 : : /*
2888 : : * Single IB device with one physical port and
2889 : : * attached network device.
2890 : : * May be SRIOV is not enabled or there is no
2891 : : * representors.
2892 : : */
2893 : 0 : DRV_LOG(INFO, "No E-Switch support detected.");
2894 : 0 : ns++;
2895 : 0 : break;
2896 : : }
2897 : : }
2898 [ # # ]: 0 : if (!ns) {
2899 : 0 : DRV_LOG(ERR,
2900 : : "Unable to recognize master/representors on the multiple IB devices.");
2901 : 0 : rte_errno = ENOENT;
2902 : 0 : ret = -rte_errno;
2903 : 0 : goto exit;
2904 : : }
2905 : : /*
2906 : : * New kernels may add the switch_id attribute for the case
2907 : : * there is no E-Switch and we wrongly recognized the only
2908 : : * device as master. Override this if there is the single
2909 : : * device with single port and new device name format present.
2910 : : */
2911 [ # # ]: 0 : if (nd == 1 &&
2912 [ # # ]: 0 : list[0].info.name_type == MLX5_PHYS_PORT_NAME_TYPE_UPLINK) {
2913 : 0 : list[0].info.master = 0;
2914 : 0 : list[0].info.representor = 0;
2915 : : }
2916 : : }
2917 : : MLX5_ASSERT(ns);
2918 : : /* Calculate number of uplinks and host PFs for each matched IB device. */
2919 : 0 : calc_nb_uplinks_hpfs(ibv_match, nd, list, ns);
2920 : : /*
2921 : : * Sort list to probe devices in natural order for users convenience
2922 : : * (i.e. master first, then representors from lowest to highest ID).
2923 : : */
2924 : 0 : qsort(list, ns, sizeof(*list), mlx5_dev_spawn_data_cmp);
2925 [ # # ]: 0 : if (eth_da.type != RTE_ETH_REPRESENTOR_NONE) {
2926 : : /* Set devargs default values. */
2927 [ # # # # ]: 0 : if (eth_da.nb_ports == 0 && ns > 0) {
2928 [ # # # # ]: 0 : if (list[0].pf_bond >= 0 && list[0].info.representor)
2929 : 0 : DRV_LOG(WARNING, "Representor on Bonding device should use pf#vf# syntax: %s",
2930 : : pci_dev->device.devargs->args);
2931 : 0 : eth_da.nb_ports = 1;
2932 : 0 : eth_da.ports[0] = list[0].info.port_name;
2933 : : }
2934 [ # # ]: 0 : if (eth_da.nb_representor_ports == 0) {
2935 : 0 : eth_da.nb_representor_ports = 1;
2936 : 0 : eth_da.representor_ports[0] = 0;
2937 : : }
2938 : : }
2939 [ # # ]: 0 : for (i = 0; i != ns; ++i) {
2940 : : uint32_t restore;
2941 : :
2942 : 0 : list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i], ð_da,
2943 : : mkvlist);
2944 [ # # ]: 0 : if (!list[i].eth_dev) {
2945 [ # # ]: 0 : if (rte_errno != EBUSY && rte_errno != EEXIST)
2946 : : break;
2947 : : /* Device is disabled or already spawned. Ignore it. */
2948 : 0 : continue;
2949 : : }
2950 : 0 : restore = list[i].eth_dev->data->dev_flags;
2951 : 0 : rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
2952 : : /**
2953 : : * Each representor has a dedicated interrupts vector.
2954 : : * rte_eth_copy_pci_info() assigns PF interrupts handle to
2955 : : * representor eth_dev object because representor and PF
2956 : : * share the same PCI address.
2957 : : * Override representor device with a dedicated
2958 : : * interrupts handle here.
2959 : : * Representor interrupts handle is released in mlx5_dev_stop().
2960 : : */
2961 [ # # ]: 0 : if (list[i].info.representor) {
2962 : : struct rte_intr_handle *intr_handle =
2963 : 0 : rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
2964 [ # # ]: 0 : if (intr_handle == NULL) {
2965 : 0 : DRV_LOG(ERR,
2966 : : "port %u failed to allocate memory for interrupt handler "
2967 : : "Rx interrupts will not be supported",
2968 : : i);
2969 : 0 : rte_errno = ENOMEM;
2970 : 0 : ret = -rte_errno;
2971 : 0 : goto exit;
2972 : : }
2973 : 0 : list[i].eth_dev->intr_handle = intr_handle;
2974 : : }
2975 : : /* Restore non-PCI flags cleared by the above call. */
2976 : 0 : list[i].eth_dev->data->dev_flags |= restore;
2977 : 0 : rte_eth_dev_probing_finish(list[i].eth_dev);
2978 : : }
2979 [ # # ]: 0 : if (i != ns) {
2980 : 0 : DRV_LOG(ERR,
2981 : : "probe of PCI device " PCI_PRI_FMT " aborted after"
2982 : : " encountering an error: %s",
2983 : : owner_pci.domain, owner_pci.bus,
2984 : : owner_pci.devid, owner_pci.function,
2985 : : strerror(rte_errno));
2986 : 0 : ret = -rte_errno;
2987 : : /* Roll back. */
2988 [ # # ]: 0 : while (i--) {
2989 [ # # ]: 0 : if (!list[i].eth_dev)
2990 : 0 : continue;
2991 : 0 : mlx5_dev_close(list[i].eth_dev);
2992 : : /* mac_addrs must not be freed because in dev_private */
2993 : 0 : list[i].eth_dev->data->mac_addrs = NULL;
2994 : 0 : claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
2995 : : }
2996 : : /* Restore original error. */
2997 : 0 : rte_errno = -ret;
2998 : : } else {
2999 : 0 : ret = 0;
3000 : : }
3001 : 0 : exit:
3002 : : /*
3003 : : * Do the routine cleanup:
3004 : : * - close opened Netlink sockets
3005 : : * - free allocated spawn data array
3006 : : * - free the Infiniband device list
3007 : : */
3008 [ # # ]: 0 : if (nl_rdma >= 0)
3009 : 0 : close(nl_rdma);
3010 [ # # ]: 0 : if (nl_route >= 0)
3011 : 0 : close(nl_route);
3012 [ # # ]: 0 : if (list)
3013 : 0 : mlx5_free(list);
3014 : : MLX5_ASSERT(ibv_list);
3015 : 0 : mlx5_glue->free_device_list(ibv_list);
3016 [ # # ]: 0 : if (ret) {
3017 [ # # ]: 0 : if (cdev->dev_info.port_info != NULL)
3018 : 0 : mlx5_free(cdev->dev_info.port_info);
3019 : 0 : memset(&cdev->dev_info, 0, sizeof(cdev->dev_info));
3020 : : }
3021 : 0 : return ret;
3022 : : }
3023 : :
3024 : : static int
3025 : 0 : mlx5_os_parse_eth_devargs(struct rte_device *dev,
3026 : : struct rte_eth_devargs *eth_da)
3027 : : {
3028 : : int ret = 0;
3029 : :
3030 [ # # ]: 0 : if (dev->devargs == NULL)
3031 : : return 0;
3032 : : memset(eth_da, 0, sizeof(*eth_da));
3033 : : /* Parse representor information first from class argument. */
3034 [ # # ]: 0 : if (dev->devargs->cls_str)
3035 : 0 : ret = rte_eth_devargs_parse(dev->devargs->cls_str, eth_da, 1);
3036 [ # # ]: 0 : if (ret < 0) {
3037 : 0 : DRV_LOG(ERR, "failed to parse device arguments: %s",
3038 : : dev->devargs->cls_str);
3039 : 0 : return -rte_errno;
3040 : : }
3041 [ # # # # ]: 0 : if (eth_da->type == RTE_ETH_REPRESENTOR_NONE && dev->devargs->args) {
3042 : : /* Parse legacy device argument */
3043 : 0 : ret = rte_eth_devargs_parse(dev->devargs->args, eth_da, 1);
3044 [ # # ]: 0 : if (ret < 0) {
3045 : 0 : DRV_LOG(ERR, "failed to parse device arguments: %s",
3046 : : dev->devargs->args);
3047 : 0 : return -rte_errno;
3048 : : }
3049 : : }
3050 : : return 0;
3051 : : }
3052 : :
3053 : : /**
3054 : : * Callback to register a PCI device.
3055 : : *
3056 : : * This function spawns Ethernet devices out of a given PCI device.
3057 : : *
3058 : : * @param[in] cdev
3059 : : * Pointer to common mlx5 device structure.
3060 : : * @param[in, out] mkvlist
3061 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
3062 : : *
3063 : : * @return
3064 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
3065 : : */
3066 : : static int
3067 : 0 : mlx5_os_pci_probe(struct mlx5_common_device *cdev,
3068 : : struct mlx5_kvargs_ctrl *mkvlist)
3069 : : {
3070 : 0 : struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(cdev->dev);
3071 : 0 : struct rte_eth_devargs eth_da = { .nb_ports = 0 };
3072 : : int ret = 0;
3073 : : uint16_t p;
3074 : :
3075 : 0 : ret = mlx5_os_parse_eth_devargs(cdev->dev, ð_da);
3076 [ # # ]: 0 : if (ret != 0)
3077 : : return ret;
3078 : :
3079 [ # # ]: 0 : if (eth_da.nb_ports > 0) {
3080 : : /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
3081 [ # # ]: 0 : for (p = 0; p < eth_da.nb_ports; p++) {
3082 : 0 : ret = mlx5_os_pci_probe_pf(cdev, ð_da,
3083 : 0 : eth_da.ports[p], mkvlist);
3084 [ # # ]: 0 : if (ret) {
3085 : 0 : DRV_LOG(INFO, "Probe of PCI device " PCI_PRI_FMT " "
3086 : : "aborted due to proding failure of PF %u",
3087 : : pci_dev->addr.domain, pci_dev->addr.bus,
3088 : : pci_dev->addr.devid, pci_dev->addr.function,
3089 : : eth_da.ports[p]);
3090 : 0 : mlx5_net_remove(cdev);
3091 [ # # ]: 0 : if (p != 0)
3092 : : break;
3093 : : }
3094 : : }
3095 : : } else {
3096 : 0 : ret = mlx5_os_pci_probe_pf(cdev, ð_da, 0, mkvlist);
3097 : : }
3098 : : return ret;
3099 : : }
3100 : :
3101 : : /* Probe a single SF device on auxiliary bus, no representor support. */
3102 : : static int
3103 : 0 : mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev,
3104 : : struct mlx5_kvargs_ctrl *mkvlist)
3105 : : {
3106 : 0 : struct rte_eth_devargs eth_da = { .nb_ports = 0 };
3107 : 0 : struct mlx5_dev_spawn_data spawn = {
3108 : : .pf_bond = -1,
3109 : : .mpesw_port = MLX5_MPESW_PORT_INVALID,
3110 : : };
3111 : 0 : struct rte_device *dev = cdev->dev;
3112 : 0 : struct rte_auxiliary_device *adev = RTE_DEV_TO_AUXILIARY(dev);
3113 : : struct rte_eth_dev *eth_dev;
3114 : : int ret = 0;
3115 : :
3116 : : /* Parse ethdev devargs. */
3117 : 0 : ret = mlx5_os_parse_eth_devargs(dev, ð_da);
3118 [ # # ]: 0 : if (ret != 0)
3119 : : return ret;
3120 : : /* Init spawn data. */
3121 : 0 : spawn.max_port = 1;
3122 : 0 : spawn.phys_port = 1;
3123 [ # # ]: 0 : spawn.phys_dev_name = mlx5_os_get_ctx_device_name(cdev->ctx);
3124 : 0 : ret = mlx5_auxiliary_get_ifindex(dev->name);
3125 [ # # ]: 0 : if (ret < 0) {
3126 : 0 : DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name);
3127 : 0 : return ret;
3128 : : }
3129 : 0 : spawn.ifindex = ret;
3130 : 0 : spawn.cdev = cdev;
3131 : : /* Spawn device. */
3132 : 0 : eth_dev = mlx5_dev_spawn(dev, &spawn, ð_da, mkvlist);
3133 [ # # ]: 0 : if (eth_dev == NULL)
3134 : 0 : return -rte_errno;
3135 : : /* Post create. */
3136 : 0 : eth_dev->intr_handle = adev->intr_handle;
3137 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3138 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3139 : 0 : eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_RMV;
3140 : 0 : eth_dev->data->numa_node = dev->numa_node;
3141 : : }
3142 : 0 : rte_eth_dev_probing_finish(eth_dev);
3143 : 0 : return 0;
3144 : : }
3145 : :
3146 : : /**
3147 : : * Net class driver callback to probe a device.
3148 : : *
3149 : : * This function probe PCI bus device(s) or a single SF on auxiliary bus.
3150 : : *
3151 : : * @param[in] cdev
3152 : : * Pointer to the common mlx5 device.
3153 : : * @param[in, out] mkvlist
3154 : : * Pointer to mlx5 kvargs control, can be NULL if there is no devargs.
3155 : : *
3156 : : * @return
3157 : : * 0 on success, a negative errno value otherwise and rte_errno is set.
3158 : : */
3159 : : int
3160 : 0 : mlx5_os_net_probe(struct mlx5_common_device *cdev,
3161 : : struct mlx5_kvargs_ctrl *mkvlist)
3162 : : {
3163 : : int ret;
3164 : :
3165 [ # # ]: 0 : if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3166 : 0 : mlx5_pmd_socket_init();
3167 : 0 : ret = mlx5_init_once();
3168 [ # # ]: 0 : if (ret) {
3169 : 0 : DRV_LOG(ERR, "Unable to init PMD global data: %s",
3170 : : strerror(rte_errno));
3171 : 0 : return -rte_errno;
3172 : : }
3173 : 0 : ret = mlx5_probe_again_args_validate(cdev, mkvlist);
3174 [ # # ]: 0 : if (ret) {
3175 : 0 : DRV_LOG(ERR, "Probe again parameters are not compatible : %s",
3176 : : strerror(rte_errno));
3177 : 0 : return -rte_errno;
3178 : : }
3179 [ # # ]: 0 : if (mlx5_dev_is_pci(cdev->dev))
3180 : 0 : return mlx5_os_pci_probe(cdev, mkvlist);
3181 : : else
3182 : 0 : return mlx5_os_auxiliary_probe(cdev, mkvlist);
3183 : : }
3184 : :
3185 : : /**
3186 : : * Cleanup resources when the last device is closed.
3187 : : */
3188 : : void
3189 : 0 : mlx5_os_net_cleanup(void)
3190 : : {
3191 : 0 : mlx5_pmd_socket_uninit();
3192 : 0 : }
3193 : :
3194 : : /**
3195 : : * Install shared asynchronous device events handler.
3196 : : * This function is implemented to support event sharing
3197 : : * between multiple ports of single IB device.
3198 : : *
3199 : : * @param sh
3200 : : * Pointer to mlx5_dev_ctx_shared object.
3201 : : */
3202 : : void
3203 : 0 : mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
3204 : : {
3205 : 0 : struct ibv_context *ctx = sh->cdev->ctx;
3206 : : int nlsk_fd;
3207 : 0 : uint8_t rdma_monitor_supp = 0;
3208 : :
3209 : 0 : sh->intr_handle = mlx5_os_interrupt_handler_create
3210 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3211 : : ctx->async_fd, mlx5_dev_interrupt_handler, sh);
3212 [ # # ]: 0 : if (!sh->intr_handle) {
3213 : 0 : DRV_LOG(ERR, "Failed to allocate intr_handle.");
3214 : 0 : return;
3215 : : }
3216 [ # # ]: 0 : if (sh->cdev->dev_info.probe_opt &&
3217 [ # # ]: 0 : sh->cdev->dev_info.port_num > 1 &&
3218 [ # # ]: 0 : !sh->rdma_monitor_supp) {
3219 : 0 : nlsk_fd = mlx5_nl_rdma_monitor_init();
3220 [ # # ]: 0 : if (nlsk_fd < 0) {
3221 : 0 : DRV_LOG(ERR, "Failed to create a socket for RDMA Netlink events: %s",
3222 : : rte_strerror(rte_errno));
3223 : 0 : return;
3224 : : }
3225 [ # # ]: 0 : if (mlx5_nl_rdma_monitor_cap_get(nlsk_fd, &rdma_monitor_supp)) {
3226 : 0 : DRV_LOG(ERR, "Failed to query RDMA monitor support: %s",
3227 : : rte_strerror(rte_errno));
3228 : 0 : close(nlsk_fd);
3229 : 0 : return;
3230 : : }
3231 : 0 : sh->rdma_monitor_supp = rdma_monitor_supp;
3232 [ # # ]: 0 : if (sh->rdma_monitor_supp) {
3233 : 0 : sh->intr_handle_ib = mlx5_os_interrupt_handler_create
3234 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3235 : : nlsk_fd, mlx5_dev_interrupt_handler_ib, sh);
3236 [ # # ]: 0 : if (sh->intr_handle_ib == NULL) {
3237 : 0 : DRV_LOG(ERR, "Fail to allocate intr_handle");
3238 : 0 : close(nlsk_fd);
3239 : 0 : return;
3240 : : }
3241 : 0 : sh->cdev->dev_info.async_mon_ready = 1;
3242 : : } else {
3243 : 0 : close(nlsk_fd);
3244 [ # # ]: 0 : if (sh->cdev->dev_info.probe_opt) {
3245 : 0 : DRV_LOG(INFO, "Failed to create rdma link monitor, disable probe optimization");
3246 : 0 : sh->cdev->dev_info.probe_opt = 0;
3247 : 0 : mlx5_free(sh->cdev->dev_info.port_info);
3248 : 0 : sh->cdev->dev_info.port_info = NULL;
3249 : : }
3250 : : }
3251 : : }
3252 : 0 : nlsk_fd = mlx5_nl_init(NETLINK_ROUTE, RTMGRP_LINK);
3253 [ # # ]: 0 : if (nlsk_fd < 0) {
3254 : 0 : DRV_LOG(ERR, "Failed to create a socket for Netlink events: %s",
3255 : : rte_strerror(rte_errno));
3256 : 0 : return;
3257 : : }
3258 : 0 : sh->intr_handle_nl = mlx5_os_interrupt_handler_create
3259 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3260 : : nlsk_fd, mlx5_dev_interrupt_handler_nl, sh);
3261 [ # # ]: 0 : if (sh->intr_handle_nl == NULL) {
3262 : 0 : DRV_LOG(ERR, "Fail to allocate intr_handle");
3263 : 0 : return;
3264 : : }
3265 [ # # ]: 0 : if (sh->cdev->config.devx) {
3266 : : #ifdef HAVE_IBV_DEVX_ASYNC
3267 : : struct mlx5dv_devx_cmd_comp *devx_comp;
3268 : :
3269 : 0 : sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
3270 : : devx_comp = sh->devx_comp;
3271 [ # # ]: 0 : if (!devx_comp) {
3272 : 0 : DRV_LOG(INFO, "failed to allocate devx_comp.");
3273 : 0 : return;
3274 : : }
3275 : 0 : sh->intr_handle_devx = mlx5_os_interrupt_handler_create
3276 : : (RTE_INTR_INSTANCE_F_SHARED, true,
3277 : : devx_comp->fd,
3278 : : mlx5_dev_interrupt_handler_devx, sh);
3279 [ # # ]: 0 : if (!sh->intr_handle_devx) {
3280 : 0 : DRV_LOG(ERR, "Failed to allocate intr_handle.");
3281 : 0 : return;
3282 : : }
3283 : : #endif /* HAVE_IBV_DEVX_ASYNC */
3284 : : }
3285 : : }
3286 : :
3287 : : /**
3288 : : * Uninstall shared asynchronous device events handler.
3289 : : * This function is implemented to support event sharing
3290 : : * between multiple ports of single IB device.
3291 : : *
3292 : : * @param dev
3293 : : * Pointer to mlx5_dev_ctx_shared object.
3294 : : */
3295 : : void
3296 : 0 : mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
3297 : : {
3298 : : int fd;
3299 : :
3300 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle,
3301 : : mlx5_dev_interrupt_handler, sh);
3302 : 0 : fd = rte_intr_fd_get(sh->intr_handle_nl);
3303 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle_nl,
3304 : : mlx5_dev_interrupt_handler_nl, sh);
3305 [ # # ]: 0 : if (fd >= 0)
3306 : 0 : close(fd);
3307 : : #ifdef HAVE_IBV_DEVX_ASYNC
3308 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle_devx,
3309 : : mlx5_dev_interrupt_handler_devx, sh);
3310 [ # # ]: 0 : if (sh->devx_comp)
3311 : 0 : mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp);
3312 : : #endif
3313 : 0 : fd = rte_intr_fd_get(sh->intr_handle_ib);
3314 : 0 : mlx5_os_interrupt_handler_destroy(sh->intr_handle_ib,
3315 : : mlx5_dev_interrupt_handler_ib, sh);
3316 [ # # ]: 0 : if (fd >= 0)
3317 : 0 : close(fd);
3318 : 0 : }
3319 : :
3320 : : /**
3321 : : * Read statistics by a named counter.
3322 : : *
3323 : : * @param[in] priv
3324 : : * Pointer to the private device data structure.
3325 : : * @param[in] ctr_name
3326 : : * Pointer to the name of the statistic counter to read
3327 : : * @param[out] stat
3328 : : * Pointer to read statistic value.
3329 : : * @return
3330 : : * 0 on success and stat is valud, 1 if failed to read the value
3331 : : * rte_errno is set.
3332 : : *
3333 : : */
3334 : : int
3335 : 0 : mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
3336 : : uint64_t *stat)
3337 : : {
3338 : : int fd;
3339 : :
3340 [ # # ]: 0 : if (priv->sh) {
3341 [ # # ]: 0 : if (priv->q_counters != NULL &&
3342 [ # # ]: 0 : strcmp(ctr_name, "out_of_buffer") == 0) {
3343 : 0 : return mlx5_read_queue_counter(priv->q_counters, ctr_name, stat);
3344 : : }
3345 [ # # ]: 0 : if (priv->q_counter_hairpin != NULL &&
3346 [ # # ]: 0 : strcmp(ctr_name, "hairpin_out_of_buffer") == 0) {
3347 : 0 : return mlx5_read_queue_counter(priv->q_counter_hairpin, ctr_name, stat);
3348 : : }
3349 : 0 : MKSTR(path, "%s/ports/%d/hw_counters/%s",
3350 : : priv->sh->ibdev_path,
3351 : : priv->dev_port,
3352 : : ctr_name);
3353 : : fd = open(path, O_RDONLY);
3354 : : /*
3355 : : * in switchdev the file location is not per port
3356 : : * but rather in <ibdev_path>/hw_counters/<file_name>.
3357 : : */
3358 [ # # ]: 0 : if (fd == -1) {
3359 : 0 : MKSTR(path1, "%s/hw_counters/%s",
3360 : : priv->sh->ibdev_path,
3361 : : ctr_name);
3362 : : fd = open(path1, O_RDONLY);
3363 : : }
3364 [ # # ]: 0 : if (fd != -1) {
3365 : 0 : char buf[21] = {'\0'};
3366 : : ssize_t n = read(fd, buf, sizeof(buf));
3367 : :
3368 : 0 : close(fd);
3369 [ # # ]: 0 : if (n != -1) {
3370 : 0 : *stat = strtoull(buf, NULL, 10);
3371 : 0 : return 0;
3372 : : }
3373 : : }
3374 : : }
3375 : 0 : *stat = 0;
3376 : 0 : return 1;
3377 : : }
3378 : :
3379 : : /**
3380 : : * Remove a MAC address from device
3381 : : *
3382 : : * @param dev
3383 : : * Pointer to Ethernet device structure.
3384 : : * @param index
3385 : : * MAC address index.
3386 : : */
3387 : : void
3388 : 0 : mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
3389 : : {
3390 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3391 : 0 : const int vf = priv->sh->dev_cap.vf;
3392 : :
3393 [ # # ]: 0 : if (vf)
3394 : 0 : mlx5_nl_mac_addr_remove(priv->nl_socket_route,
3395 : : mlx5_ifindex(dev),
3396 : 0 : &dev->data->mac_addrs[index], index);
3397 [ # # ]: 0 : if (index < MLX5_MAX_MAC_ADDRESSES)
3398 : 0 : BITFIELD_RESET(priv->mac_own, index);
3399 : 0 : }
3400 : :
3401 : : /**
3402 : : * Adds a MAC address to the device
3403 : : *
3404 : : * @param dev
3405 : : * Pointer to Ethernet device structure.
3406 : : * @param mac_addr
3407 : : * MAC address to register.
3408 : : * @param index
3409 : : * MAC address index.
3410 : : *
3411 : : * @return
3412 : : * 0 on success, a negative errno value otherwise
3413 : : */
3414 : : int
3415 : 0 : mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
3416 : : uint32_t index)
3417 : : {
3418 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3419 : 0 : const int vf = priv->sh->dev_cap.vf;
3420 : : int ret = 0;
3421 : :
3422 [ # # ]: 0 : if (vf)
3423 : 0 : ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
3424 : : mlx5_ifindex(dev),
3425 : : mac, index);
3426 [ # # ]: 0 : if (!ret)
3427 : 0 : BITFIELD_SET(priv->mac_own, index);
3428 : :
3429 : 0 : return ret;
3430 : : }
3431 : :
3432 : : /**
3433 : : * Modify a VF MAC address
3434 : : *
3435 : : * @param priv
3436 : : * Pointer to device private data.
3437 : : * @param mac_addr
3438 : : * MAC address to modify into.
3439 : : * @param iface_idx
3440 : : * Net device interface index
3441 : : * @param vf_index
3442 : : * VF index
3443 : : *
3444 : : * @return
3445 : : * 0 on success, a negative errno value otherwise
3446 : : */
3447 : : int
3448 : 0 : mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
3449 : : unsigned int iface_idx,
3450 : : struct rte_ether_addr *mac_addr,
3451 : : int vf_index)
3452 : : {
3453 : 0 : return mlx5_nl_vf_mac_addr_modify
3454 : : (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
3455 : : }
3456 : :
3457 : : /**
3458 : : * Set device promiscuous mode
3459 : : *
3460 : : * @param dev
3461 : : * Pointer to Ethernet device structure.
3462 : : * @param enable
3463 : : * 0 - promiscuous is disabled, otherwise - enabled
3464 : : *
3465 : : * @return
3466 : : * 0 on success, a negative error value otherwise
3467 : : */
3468 : : int
3469 : 0 : mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
3470 : : {
3471 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3472 : :
3473 : 0 : return mlx5_nl_promisc(priv->nl_socket_route,
3474 : : mlx5_ifindex(dev), !!enable);
3475 : : }
3476 : :
3477 : : /**
3478 : : * Set device promiscuous mode
3479 : : *
3480 : : * @param dev
3481 : : * Pointer to Ethernet device structure.
3482 : : * @param enable
3483 : : * 0 - all multicase is disabled, otherwise - enabled
3484 : : *
3485 : : * @return
3486 : : * 0 on success, a negative error value otherwise
3487 : : */
3488 : : int
3489 : 0 : mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
3490 : : {
3491 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3492 : :
3493 : 0 : return mlx5_nl_allmulti(priv->nl_socket_route,
3494 : : mlx5_ifindex(dev), !!enable);
3495 : : }
3496 : :
3497 : : /**
3498 : : * Flush device MAC addresses
3499 : : *
3500 : : * @param dev
3501 : : * Pointer to Ethernet device structure.
3502 : : *
3503 : : */
3504 : : void
3505 : 0 : mlx5_os_mac_addr_flush(struct rte_eth_dev *dev)
3506 : : {
3507 : 0 : struct mlx5_priv *priv = dev->data->dev_private;
3508 : 0 : const int vf = priv->sh->dev_cap.vf;
3509 : :
3510 : 0 : mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
3511 : : dev->data->mac_addrs,
3512 : 0 : MLX5_MAX_MAC_ADDRESSES, priv->mac_own, vf);
3513 : 0 : }
3514 : :
3515 : : static bool
3516 : : mlx5_hws_is_supported(struct mlx5_dev_ctx_shared *sh)
3517 : : {
3518 [ # # # # ]: 0 : return (sh->cdev->config.devx &&
3519 : : sh->cdev->config.hca_attr.wqe_based_flow_table_sup);
3520 : : }
3521 : :
3522 : : static bool
3523 : : mlx5_sws_is_any_supported(struct mlx5_dev_ctx_shared *sh)
3524 : : {
3525 : : struct mlx5_common_device *cdev = sh->cdev;
3526 : : struct mlx5_hca_attr *hca_attr = &cdev->config.hca_attr;
3527 : :
3528 [ # # # # ]: 0 : if (hca_attr->rx_sw_owner_v2 || hca_attr->rx_sw_owner)
3529 : : return true;
3530 : :
3531 [ # # # # ]: 0 : if (hca_attr->tx_sw_owner_v2 || hca_attr->tx_sw_owner)
3532 : : return true;
3533 : :
3534 [ # # # # : 0 : if (hca_attr->eswitch_manager && (hca_attr->esw_sw_owner_v2 || hca_attr->esw_sw_owner))
# # # # ]
3535 : 0 : return true;
3536 : :
3537 : : return false;
3538 : : }
3539 : :
3540 : : /**
3541 : : * Initialize default shared configuration for arguments related to flow engine.
3542 : : *
3543 : : * @param[in] sh
3544 : : * Pointer to shared configuration.
3545 : : * @param[in] sh
3546 : : * Pointer to shared device context.
3547 : : */
3548 : : void
3549 [ # # ]: 0 : mlx5_os_default_flow_config(struct mlx5_sh_config *config, struct mlx5_dev_ctx_shared *sh)
3550 : : {
3551 : : bool hws_is_supported = mlx5_hws_is_supported(sh);
3552 : : bool sws_is_supported = mlx5_sws_is_any_supported(sh);
3553 : :
3554 [ # # ]: 0 : if (!sws_is_supported && hws_is_supported)
3555 : 0 : config->dv_flow_en = 2;
3556 : : else
3557 : 0 : config->dv_flow_en = 1;
3558 : :
3559 [ # # ]: 0 : if (config->dv_flow_en == 2)
3560 : 0 : config->allow_duplicate_pattern = 0;
3561 : : else
3562 : 0 : config->allow_duplicate_pattern = 1;
3563 : 0 : }
3564 : :
3565 : : static bool
3566 : 0 : mlx5_kvargs_is_used(struct mlx5_kvargs_ctrl *mkvlist, const char *key)
3567 : : {
3568 : : const struct rte_kvargs_pair *pair;
3569 : : uint32_t i;
3570 : :
3571 [ # # ]: 0 : for (i = 0; i < mkvlist->kvlist->count; ++i) {
3572 : : pair = &mkvlist->kvlist->pairs[i];
3573 [ # # # # ]: 0 : if (strcmp(pair->key, key) == 0 && mkvlist->is_used[i])
3574 : : return true;
3575 : : }
3576 : : return false;
3577 : : }
3578 : :
3579 [ # # ]: 0 : void mlx5_os_fixup_flow_en(struct mlx5_sh_config *config,
3580 : : struct mlx5_dev_ctx_shared *sh)
3581 : : {
3582 : : bool hws_is_supported = mlx5_hws_is_supported(sh);
3583 : : bool sws_is_supported = mlx5_sws_is_any_supported(sh);
3584 : :
3585 : : /* Inform user if DV flow is not supported. */
3586 [ # # # # ]: 0 : if (config->dv_flow_en == 1 && !sws_is_supported && hws_is_supported) {
3587 : 0 : DRV_LOG(WARNING, "DV flow is not supported. Changing to HWS mode.");
3588 : 0 : config->dv_flow_en = 2;
3589 : : }
3590 : 0 : }
3591 : :
3592 : : void
3593 : 0 : mlx5_os_fixup_duplicate_pattern(struct mlx5_sh_config *config,
3594 : : struct mlx5_kvargs_ctrl *mkvlist,
3595 : : const char *key)
3596 : : {
3597 : : /* Handle allow_duplicate_pattern based on final dv_flow_en mode.
3598 : : * HWS mode (dv_flow_en=2) doesn't support duplicate patterns.
3599 : : * Warn only if user explicitly requested an incompatible setting.
3600 : : */
3601 [ # # # # ]: 0 : bool allow_dup_pattern_set = mkvlist != NULL &&
3602 : 0 : mlx5_kvargs_is_used(mkvlist, key);
3603 [ # # ]: 0 : if (config->dv_flow_en == 2) {
3604 [ # # # # ]: 0 : if (config->allow_duplicate_pattern == 1 && allow_dup_pattern_set)
3605 : 0 : DRV_LOG(WARNING, "Duplicate pattern is not supported with HWS. Disabling it.");
3606 : 0 : config->allow_duplicate_pattern = 0;
3607 [ # # ]: 0 : } else if (!allow_dup_pattern_set) {
3608 : : /* Non-HWS mode: set default to 1 only if not explicitly set by user */
3609 : 0 : config->allow_duplicate_pattern = 1;
3610 : : }
3611 : 0 : }
|