Branch data Line data Source code
1 : : /* SPDX-License-Identifier: BSD-3-Clause 2 : : * Copyright(c) 2024 Intel Corporation 3 : : */ 4 : : 5 : : #ifndef _COMMON_INTEL_RX_H_ 6 : : #define _COMMON_INTEL_RX_H_ 7 : : 8 : : #include <stddef.h> 9 : : #include <stdint.h> 10 : : #include <unistd.h> 11 : : #include <rte_mbuf.h> 12 : : #include <rte_ethdev.h> 13 : : #include <rte_vect.h> 14 : : 15 : : #include "desc.h" 16 : : 17 : : #define CI_RX_MAX_BURST 32 18 : : #define CI_RX_MAX_NSEG 2 19 : : #define CI_VPMD_RX_BURST 32 20 : : #define CI_VPMD_DESCS_PER_LOOP 4 21 : : #define CI_VPMD_DESCS_PER_LOOP_WIDE 8 22 : : #define CI_VPMD_RX_REARM_THRESH 64 23 : : 24 : : struct ci_rx_queue; 25 : : 26 : : struct ci_rx_entry { 27 : : struct rte_mbuf *mbuf; /* mbuf associated with RX descriptor. */ 28 : : }; 29 : : 30 : : struct ci_rx_entry_sc { 31 : : struct rte_mbuf *fbuf; /* First segment of the fragmented packet.*/ 32 : : }; 33 : : 34 : : typedef void (*ci_rx_release_mbufs_t)(struct ci_rx_queue *rxq); 35 : : 36 : : /** 37 : : * Structure associated with each RX queue. 38 : : */ 39 : : struct ci_rx_queue { 40 : : struct rte_mempool *mp; /**< mbuf pool to populate RX ring. */ 41 : : union { /* RX ring virtual address */ 42 : : volatile union ixgbe_adv_rx_desc *ixgbe_rx_ring; 43 : : volatile union ci_rx_desc *rx_ring; 44 : : volatile union ci_rx_flex_desc *rx_flex_ring; 45 : : }; 46 : : volatile uint8_t *qrx_tail; /**< register address of tail */ 47 : : struct ci_rx_entry *sw_ring; /**< address of RX software ring. */ 48 : : struct ci_rx_entry_sc *sw_sc_ring; /**< address of scattered Rx software ring. */ 49 : : rte_iova_t rx_ring_phys_addr; /**< RX ring DMA address. */ 50 : : struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ 51 : : struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ 52 : : /** hold packets to return to application */ 53 : : struct rte_mbuf *rx_stage[CI_RX_MAX_BURST * 2]; 54 : : uint16_t nb_rx_desc; /**< number of RX descriptors. */ 55 : : uint16_t rx_tail; /**< current value of tail register. */ 56 : : uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */ 57 : : uint16_t nb_rx_hold; /**< number of held free RX desc. */ 58 : : uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */ 59 : : uint16_t rx_free_thresh; /**< max free RX desc to hold. */ 60 : : uint16_t rx_free_trigger; /**< triggers rx buffer allocation */ 61 : : uint16_t rxrearm_nb; /**< number of remaining to be re-armed */ 62 : : uint16_t rxrearm_start; /**< the idx we start the re-arming from */ 63 : : uint16_t queue_id; /**< RX queue index. */ 64 : : uint16_t port_id; /**< Device port identifier. */ 65 : : uint16_t reg_idx; /**< RX queue register index. */ 66 : : uint16_t rx_buf_len; /* The packet buffer size */ 67 : : uint16_t rx_hdr_len; /* The header buffer size */ 68 : : uint16_t max_pkt_len; /* Maximum packet length */ 69 : : uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ 70 : : bool q_set; /**< indicate if rx queue has been configured */ 71 : : bool rx_deferred_start; /**< queue is not started on dev start. */ 72 : : bool fdir_enabled; /* 0 if FDIR disabled, 1 when enabled */ 73 : : bool vector_rx; /**< indicates that vector RX is in use */ 74 : : bool drop_en; /**< if 1, drop packets if no descriptors are available. */ 75 : : uint64_t mbuf_initializer; /**< value to init mbufs */ 76 : : uint64_t offloads; /**< Rx offloads with RTE_ETH_RX_OFFLOAD_* */ 77 : : uint32_t rxdid; /**< RX descriptor format ID. */ 78 : : uint32_t proto_xtr; /* protocol extraction type */ 79 : : uint64_t xtr_ol_flag; /* flexible descriptor metadata extraction offload flag */ 80 : : ptrdiff_t xtr_field_offs; /* Protocol extraction matedata offset*/ 81 : : uint64_t hw_time_update; /**< Last time HW timestamp was updated */ 82 : : /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ 83 : : struct rte_mbuf fake_mbuf; 84 : : union { /* the VSI this queue belongs to */ 85 : : struct i40e_vsi *i40e_vsi; 86 : : struct ice_vsi *ice_vsi; 87 : : struct iavf_vsi *iavf_vsi; 88 : : }; 89 : : const struct rte_memzone *mz; 90 : : union { 91 : : struct { /* ixgbe specific values */ 92 : : /** flags to set in mbuf when a vlan is detected. */ 93 : : uint64_t vlan_flags; 94 : : /** Packet type mask for different NICs. */ 95 : : uint16_t pkt_type_mask; 96 : : /** indicates that IPsec RX feature is in use */ 97 : : uint8_t using_ipsec; 98 : : /** UDP frames with a 0 checksum can be marked as checksum errors. */ 99 : : uint8_t rx_udp_csum_zero_err; 100 : : }; 101 : : struct { /* i40e specific values */ 102 : : uint8_t hs_mode; /**< Header Split mode */ 103 : : uint8_t dcb_tc; /**< Traffic class of rx queue */ 104 : : }; 105 : : struct { /* ice specific values */ 106 : : ci_rx_release_mbufs_t rx_rel_mbufs; /**< release mbuf function */ 107 : : /** holds buffer split information */ 108 : : struct rte_eth_rxseg_split rxseg[CI_RX_MAX_NSEG]; 109 : : struct ci_rx_entry *sw_split_buf; /**< Buffer split SW ring */ 110 : : uint32_t rxseg_nb; /**< number of buffer split segments */ 111 : : uint32_t time_high; /* high 32 bits of hardware timestamp register */ 112 : : uint32_t hw_time_high; /* high 32 bits of timestamp */ 113 : : uint32_t hw_time_low; /* low 32 bits of timestamp */ 114 : : int ts_offset; /* dynamic mbuf timestamp field offset */ 115 : : uint64_t ts_flag; /* dynamic mbuf timestamp flag */ 116 : : }; 117 : : struct { /* iavf specific values */ 118 : : const struct iavf_rxq_ops *ops; /**< queue ops */ 119 : : struct iavf_rx_queue_stats *stats; /**< per-queue stats */ 120 : : uint64_t phc_time; /**< HW timestamp */ 121 : : uint8_t rel_mbufs_type; /**< type of release mbuf function */ 122 : : uint8_t rx_flags; /**< Rx VLAN tag location flags */ 123 : : #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0) 124 : : #define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(1) 125 : : }; 126 : : }; 127 : : }; 128 : : 129 : : struct ci_rx_path_features_extra { 130 : : bool scattered; 131 : : bool flex_desc; 132 : : bool bulk_alloc; 133 : : bool disabled; 134 : : }; 135 : : 136 : : struct ci_rx_path_features { 137 : : uint32_t rx_offloads; 138 : : enum rte_vect_max_simd simd_width; 139 : : struct ci_rx_path_features_extra extra; 140 : : }; 141 : : 142 : : struct ci_rx_path_info { 143 : : eth_rx_burst_t pkt_burst; 144 : : const char *info; 145 : : struct ci_rx_path_features features; 146 : : }; 147 : : 148 : : static inline uint16_t 149 : 0 : ci_rx_reassemble_packets(struct rte_mbuf **rx_bufs, uint16_t nb_bufs, uint8_t *split_flags, 150 : : struct rte_mbuf **pkt_first_seg, struct rte_mbuf **pkt_last_seg, 151 : : const uint8_t crc_len) 152 : : { 153 : 0 : struct rte_mbuf *pkts[CI_RX_MAX_BURST] = {0}; /*finished pkts*/ 154 : 0 : struct rte_mbuf *start = *pkt_first_seg; 155 : 0 : struct rte_mbuf *end = *pkt_last_seg; 156 : : unsigned int pkt_idx, buf_idx; 157 : : 158 [ # # ]: 0 : for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { 159 [ # # ]: 0 : if (end) { 160 : : /* processing a split packet */ 161 : 0 : end->next = rx_bufs[buf_idx]; 162 : 0 : rx_bufs[buf_idx]->data_len += crc_len; 163 : : 164 : 0 : start->nb_segs++; 165 : 0 : start->pkt_len += rx_bufs[buf_idx]->data_len; 166 : : end = end->next; 167 : : 168 [ # # ]: 0 : if (!split_flags[buf_idx]) { 169 : : /* it's the last packet of the set */ 170 : 0 : start->hash = end->hash; 171 : 0 : start->vlan_tci = end->vlan_tci; 172 : 0 : start->ol_flags = end->ol_flags; 173 : : /* we need to strip crc for the whole packet */ 174 : 0 : start->pkt_len -= crc_len; 175 [ # # ]: 0 : if (end->data_len > crc_len) { 176 : 0 : end->data_len -= crc_len; 177 : : } else { 178 : : /* free up last mbuf */ 179 : : struct rte_mbuf *secondlast = start; 180 : : 181 : 0 : start->nb_segs--; 182 [ # # ]: 0 : while (secondlast->next != end) 183 : : secondlast = secondlast->next; 184 : 0 : secondlast->data_len -= (crc_len - end->data_len); 185 : 0 : secondlast->next = NULL; 186 : : rte_pktmbuf_free_seg(end); 187 : : } 188 : 0 : pkts[pkt_idx++] = start; 189 : : start = NULL; 190 : : end = NULL; 191 : : } 192 : : } else { 193 : : /* not processing a split packet */ 194 [ # # ]: 0 : if (!split_flags[buf_idx]) { 195 : : /* not a split packet, save and skip */ 196 : 0 : pkts[pkt_idx++] = rx_bufs[buf_idx]; 197 : 0 : continue; 198 : : } 199 : 0 : start = rx_bufs[buf_idx]; 200 : : end = start; 201 : 0 : rx_bufs[buf_idx]->data_len += crc_len; 202 : 0 : rx_bufs[buf_idx]->pkt_len += crc_len; 203 : : } 204 : : } 205 : : 206 : : /* save the partial packet for next time */ 207 : 0 : *pkt_first_seg = start; 208 : 0 : *pkt_last_seg = end; 209 : 0 : memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); 210 : 0 : return pkt_idx; 211 : : } 212 : : 213 : : static inline uint64_t 214 : 0 : ci_rxq_mbuf_initializer(uint16_t port_id) 215 : : { 216 : 0 : struct rte_mbuf mb_def = { 217 : : .nb_segs = 1, 218 : : .data_off = RTE_PKTMBUF_HEADROOM, 219 : : .port = port_id, 220 : : }; 221 : : rte_mbuf_refcnt_set(&mb_def, 1); 222 : : 223 : 0 : return mb_def.rearm_data[0]; 224 : : } 225 : : 226 : : /* basic checks for a vector-driver capable queue. 227 : : * Individual drivers may have other further tests beyond this. 228 : : */ 229 : : static inline bool 230 : : ci_rxq_vec_capable(uint16_t nb_desc, uint16_t rx_free_thresh, uint64_t offloads) 231 : : { 232 [ # # # # ]: 0 : if (!rte_is_power_of_2(nb_desc) || 233 : 0 : rx_free_thresh < CI_RX_MAX_BURST || 234 [ # # ]: 0 : (nb_desc % rx_free_thresh) != 0) 235 : : return false; 236 : : 237 : : /* no driver supports timestamping or buffer split on vector path */ 238 [ # # ]: 0 : if ((offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || 239 : : (offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) 240 : : return false; 241 : : 242 : : return true; 243 : : } 244 : : 245 : : /** 246 : : * Select the best matching Rx path based on features 247 : : * 248 : : * @param req_features 249 : : * The requested features for the Rx path 250 : : * @param infos 251 : : * Array of information about the available Rx paths 252 : : * @param num_paths 253 : : * Number of available paths in the infos array 254 : : * @param default_path 255 : : * Index of the default path to use if no suitable path is found 256 : : * 257 : : * @return 258 : : * The packet burst function index that best matches the requested features, 259 : : * or default_path if no suitable path is found 260 : : */ 261 : : static inline int 262 : 0 : ci_rx_path_select(struct ci_rx_path_features req_features, 263 : : const struct ci_rx_path_info *infos, 264 : : int num_paths, 265 : : int default_path) 266 : : { 267 : : int i, idx = default_path; 268 : : const struct ci_rx_path_features *current_features = NULL; 269 : : 270 [ # # ]: 0 : for (i = 0; i < num_paths; i++) { 271 : 0 : const struct ci_rx_path_features *path_features = &infos[i].features; 272 : : 273 : : /* Do not select a disabled rx path. */ 274 [ # # ]: 0 : if (path_features->extra.disabled) 275 : 0 : continue; 276 : : 277 : : /* If requested, ensure the path uses the flexible descriptor. */ 278 [ # # ]: 0 : if (path_features->extra.flex_desc != req_features.extra.flex_desc) 279 : 0 : continue; 280 : : 281 : : /* If requested, ensure the path supports scattered RX. */ 282 [ # # ]: 0 : if (path_features->extra.scattered != req_features.extra.scattered) 283 : 0 : continue; 284 : : 285 : : /* Do not use a bulk alloc path if not requested. */ 286 [ # # # # ]: 0 : if (path_features->extra.bulk_alloc && !req_features.extra.bulk_alloc) 287 : 0 : continue; 288 : : 289 : : /* Ensure the path supports the requested RX offloads. */ 290 [ # # ]: 0 : if ((path_features->rx_offloads & req_features.rx_offloads) != 291 : : req_features.rx_offloads) 292 : 0 : continue; 293 : : 294 : : /* Ensure the path's SIMD width is compatible with the requested width. */ 295 [ # # ]: 0 : if (path_features->simd_width > req_features.simd_width) 296 : 0 : continue; 297 : : 298 : : /* Do not select the path if it is less suitable than the current path. */ 299 [ # # ]: 0 : if (current_features != NULL) { 300 : : /* Do not select paths with lower SIMD width than the current path. */ 301 [ # # ]: 0 : if (path_features->simd_width < current_features->simd_width) 302 : 0 : continue; 303 : : /* Do not select paths with more offloads enabled than the current path. */ 304 : 0 : if (rte_popcount32(path_features->rx_offloads) > 305 [ # # ]: 0 : rte_popcount32(current_features->rx_offloads)) 306 : 0 : continue; 307 : : /* Do not select paths without bulk alloc support if requested and the 308 : : * current path already meets this requirement. 309 : : */ 310 [ # # # # ]: 0 : if (!path_features->extra.bulk_alloc && req_features.extra.bulk_alloc && 311 [ # # ]: 0 : current_features->extra.bulk_alloc) 312 : 0 : continue; 313 : : } 314 : : 315 : : /* Finally, select the path since it has met all the requirements. */ 316 : : idx = i; 317 : 0 : current_features = &infos[idx].features; 318 : : } 319 : : 320 : 0 : return idx; 321 : : } 322 : : 323 : : #endif /* _COMMON_INTEL_RX_H_ */