DPDK  25.11.0
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #include <stdint.h>
149 
150 /* Use this macro to check if LRO API is supported */
151 #define RTE_ETHDEV_HAS_LRO_SUPPORT
152 
153 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
154 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
155 #define RTE_ETHDEV_DEBUG_RX
156 #define RTE_ETHDEV_DEBUG_TX
157 #endif
158 
159 #include <rte_cman.h>
160 #include <rte_compat.h>
161 #include <rte_log.h>
162 #include <rte_interrupts.h>
163 #include <rte_dev.h>
164 #include <rte_devargs.h>
165 #include <rte_bitops.h>
166 #include <rte_errno.h>
167 #include <rte_common.h>
168 #include <rte_config.h>
169 #include <rte_power_intrinsics.h>
170 
171 #include "rte_ethdev_trace_fp.h"
172 #include "rte_dev_info.h"
173 
174 #ifdef __cplusplus
175 extern "C" {
176 #endif
177 
178 extern int rte_eth_dev_logtype;
179 #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
180 
181 #define RTE_ETHDEV_LOG_LINE(level, ...) \
182  RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
183 
184 struct rte_mbuf;
185 
202 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
203 
219 
233 
247 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
248  for (rte_eth_iterator_init(iter, devargs), \
249  id = rte_eth_iterator_next(iter); \
250  id != RTE_MAX_ETHPORTS; \
251  id = rte_eth_iterator_next(iter))
252 
263  uint64_t ipackets;
264  uint64_t opackets;
265  uint64_t ibytes;
266  uint64_t obytes;
271  uint64_t imissed;
272  uint64_t ierrors;
273  uint64_t oerrors;
274  uint64_t rx_nombuf;
275 };
276 
280 #define RTE_ETH_LINK_SPEED_AUTONEG 0
281 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
282 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
283 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
284 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
285 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
286 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
287 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
288 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
289 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
290 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
291 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
292 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
293 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
294 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
295 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
296 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
297 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
298 #define RTE_ETH_LINK_SPEED_800G RTE_BIT32(17)
304 #define RTE_ETH_SPEED_NUM_NONE 0
305 #define RTE_ETH_SPEED_NUM_10M 10
306 #define RTE_ETH_SPEED_NUM_100M 100
307 #define RTE_ETH_SPEED_NUM_1G 1000
308 #define RTE_ETH_SPEED_NUM_2_5G 2500
309 #define RTE_ETH_SPEED_NUM_5G 5000
310 #define RTE_ETH_SPEED_NUM_10G 10000
311 #define RTE_ETH_SPEED_NUM_20G 20000
312 #define RTE_ETH_SPEED_NUM_25G 25000
313 #define RTE_ETH_SPEED_NUM_40G 40000
314 #define RTE_ETH_SPEED_NUM_50G 50000
315 #define RTE_ETH_SPEED_NUM_56G 56000
316 #define RTE_ETH_SPEED_NUM_100G 100000
317 #define RTE_ETH_SPEED_NUM_200G 200000
318 #define RTE_ETH_SPEED_NUM_400G 400000
319 #define RTE_ETH_SPEED_NUM_800G 800000
320 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
357 };
358 
362 struct rte_eth_link {
363  union {
364  RTE_ATOMIC(uint64_t) val64;
365  __extension__
366  struct {
367  uint32_t link_speed;
368  uint16_t link_duplex : 1;
369  uint16_t link_autoneg : 1;
370  uint16_t link_status : 1;
371  uint16_t link_connector : 6;
372  };
373  };
374 };
375 
379 #define RTE_ETH_LINK_HALF_DUPLEX 0
380 #define RTE_ETH_LINK_FULL_DUPLEX 1
381 #define RTE_ETH_LINK_DOWN 0
382 #define RTE_ETH_LINK_UP 1
383 #define RTE_ETH_LINK_FIXED 0
384 #define RTE_ETH_LINK_AUTONEG 1
385 #define RTE_ETH_LINK_MAX_STR_LEN 40
389 #define RTE_ETH_SPEED_LANES_TO_CAPA(x) RTE_BIT32(x)
390 
393  uint32_t speed;
394  uint32_t capa;
395 };
396 
402  uint8_t pthresh;
403  uint8_t hthresh;
404  uint8_t wthresh;
405 };
406 
410 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
411 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
412 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
422 
429 
439 };
440 
450 };
451 
458  uint32_t mtu;
466  uint64_t offloads;
467 
468  uint64_t reserved_64s[2];
469  void *reserved_ptrs[2];
470 };
471 
477  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
480  RTE_ETH_VLAN_TYPE_MAX,
481 };
482 
488  uint64_t ids[64];
489 };
490 
512  RTE_ETH_HASH_FUNCTION_MAX,
513 };
514 
515 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
516 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
517 
535  uint8_t *rss_key;
536  uint8_t rss_key_len;
541  uint64_t rss_hf;
543 };
544 
545 /*
546  * A packet can be identified by hardware as different flow types. Different
547  * NIC hardware may support different flow types.
548  * Basically, the NIC hardware identifies the flow type as deep protocol as
549  * possible, and exclusively. For example, if a packet is identified as
550  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
551  * though it is an actual IPV4 packet.
552  */
553 #define RTE_ETH_FLOW_UNKNOWN 0
554 #define RTE_ETH_FLOW_RAW 1
555 #define RTE_ETH_FLOW_IPV4 2
556 #define RTE_ETH_FLOW_FRAG_IPV4 3
557 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
558 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
559 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
560 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
561 #define RTE_ETH_FLOW_IPV6 8
562 #define RTE_ETH_FLOW_FRAG_IPV6 9
563 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
564 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
565 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
566 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
567 #define RTE_ETH_FLOW_L2_PAYLOAD 14
568 #define RTE_ETH_FLOW_IPV6_EX 15
569 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
570 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
572 #define RTE_ETH_FLOW_PORT 18
573 #define RTE_ETH_FLOW_VXLAN 19
574 #define RTE_ETH_FLOW_GENEVE 20
575 #define RTE_ETH_FLOW_NVGRE 21
576 #define RTE_ETH_FLOW_VXLAN_GPE 22
577 #define RTE_ETH_FLOW_GTPU 23
578 #define RTE_ETH_FLOW_MAX 24
579 
580 /*
581  * Below macros are defined for RSS offload types, they can be used to
582  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
583  */
584 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
585 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
586 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
587 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
588 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
589 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
590 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
591 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
592 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
593 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
594 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
595 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
596 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
597 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
598 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
599 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
600 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
601 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
602 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
603 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
604 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
605 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
606 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
607 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
608 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
609 #define RTE_ETH_RSS_AH RTE_BIT64(28)
610 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
611 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
612 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
613 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
614 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
615 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
616 
629 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
630 
631 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
632 #define RTE_ETH_RSS_IPV6_FLOW_LABEL RTE_BIT64(37)
633 
635 #define RTE_ETH_RSS_IB_BTH RTE_BIT64(38)
636 
637 /*
638  * We use the following macros to combine with above RTE_ETH_RSS_* for
639  * more specific input set selection. These bits are defined starting
640  * from the high end of the 64 bits.
641  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
642  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
643  * the same level are used simultaneously, it is the same case as none of
644  * them are added.
645  */
646 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
647 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
648 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
649 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
650 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
651 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
652 
653 /*
654  * Only select IPV6 address prefix as RSS input set according to
655  * https://tools.ietf.org/html/rfc6052
656  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
657  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
658  */
659 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
660 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
661 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
662 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
663 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
664 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
665 
666 /*
667  * Use the following macros to combine with the above layers
668  * to choose inner and outer layers or both for RSS computation.
669  * Bits 50 and 51 are reserved for this.
670  */
671 
679 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
680 
685 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
686 
691 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
692 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
693 
694 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
695 
706 static inline uint64_t
707 rte_eth_rss_hf_refine(uint64_t rss_hf)
708 {
709  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
710  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
711 
712  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
713  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
714 
715  return rss_hf;
716 }
717 
718 #define RTE_ETH_RSS_IPV6_PRE32 ( \
719  RTE_ETH_RSS_IPV6 | \
720  RTE_ETH_RSS_L3_PRE32)
721 
722 #define RTE_ETH_RSS_IPV6_PRE40 ( \
723  RTE_ETH_RSS_IPV6 | \
724  RTE_ETH_RSS_L3_PRE40)
725 
726 #define RTE_ETH_RSS_IPV6_PRE48 ( \
727  RTE_ETH_RSS_IPV6 | \
728  RTE_ETH_RSS_L3_PRE48)
729 
730 #define RTE_ETH_RSS_IPV6_PRE56 ( \
731  RTE_ETH_RSS_IPV6 | \
732  RTE_ETH_RSS_L3_PRE56)
733 
734 #define RTE_ETH_RSS_IPV6_PRE64 ( \
735  RTE_ETH_RSS_IPV6 | \
736  RTE_ETH_RSS_L3_PRE64)
737 
738 #define RTE_ETH_RSS_IPV6_PRE96 ( \
739  RTE_ETH_RSS_IPV6 | \
740  RTE_ETH_RSS_L3_PRE96)
741 
742 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
743  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
744  RTE_ETH_RSS_L3_PRE32)
745 
746 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
747  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
748  RTE_ETH_RSS_L3_PRE40)
749 
750 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
751  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
752  RTE_ETH_RSS_L3_PRE48)
753 
754 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
755  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
756  RTE_ETH_RSS_L3_PRE56)
757 
758 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
759  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
760  RTE_ETH_RSS_L3_PRE64)
761 
762 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
763  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
764  RTE_ETH_RSS_L3_PRE96)
765 
766 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
767  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
768  RTE_ETH_RSS_L3_PRE32)
769 
770 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
771  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
772  RTE_ETH_RSS_L3_PRE40)
773 
774 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
775  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
776  RTE_ETH_RSS_L3_PRE48)
777 
778 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
779  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
780  RTE_ETH_RSS_L3_PRE56)
781 
782 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
783  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
784  RTE_ETH_RSS_L3_PRE64)
785 
786 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
787  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
788  RTE_ETH_RSS_L3_PRE96)
789 
790 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
791  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
792  RTE_ETH_RSS_L3_PRE32)
793 
794 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
795  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
796  RTE_ETH_RSS_L3_PRE40)
797 
798 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
799  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
800  RTE_ETH_RSS_L3_PRE48)
801 
802 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
803  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
804  RTE_ETH_RSS_L3_PRE56)
805 
806 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
807  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
808  RTE_ETH_RSS_L3_PRE64)
809 
810 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
811  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
812  RTE_ETH_RSS_L3_PRE96)
813 
814 #define RTE_ETH_RSS_IP ( \
815  RTE_ETH_RSS_IPV4 | \
816  RTE_ETH_RSS_FRAG_IPV4 | \
817  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
818  RTE_ETH_RSS_IPV6 | \
819  RTE_ETH_RSS_FRAG_IPV6 | \
820  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
821  RTE_ETH_RSS_IPV6_EX)
822 
823 #define RTE_ETH_RSS_UDP ( \
824  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
825  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
826  RTE_ETH_RSS_IPV6_UDP_EX)
827 
828 #define RTE_ETH_RSS_TCP ( \
829  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
830  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
831  RTE_ETH_RSS_IPV6_TCP_EX)
832 
833 #define RTE_ETH_RSS_SCTP ( \
834  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
835  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
836 
837 #define RTE_ETH_RSS_TUNNEL ( \
838  RTE_ETH_RSS_VXLAN | \
839  RTE_ETH_RSS_GENEVE | \
840  RTE_ETH_RSS_NVGRE)
841 
842 #define RTE_ETH_RSS_VLAN ( \
843  RTE_ETH_RSS_S_VLAN | \
844  RTE_ETH_RSS_C_VLAN)
845 
847 #define RTE_ETH_RSS_PROTO_MASK ( \
848  RTE_ETH_RSS_IPV4 | \
849  RTE_ETH_RSS_FRAG_IPV4 | \
850  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
851  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
852  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
853  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
854  RTE_ETH_RSS_IPV6 | \
855  RTE_ETH_RSS_FRAG_IPV6 | \
856  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
857  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
858  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
859  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
860  RTE_ETH_RSS_L2_PAYLOAD | \
861  RTE_ETH_RSS_IPV6_EX | \
862  RTE_ETH_RSS_IPV6_TCP_EX | \
863  RTE_ETH_RSS_IPV6_UDP_EX | \
864  RTE_ETH_RSS_PORT | \
865  RTE_ETH_RSS_VXLAN | \
866  RTE_ETH_RSS_GENEVE | \
867  RTE_ETH_RSS_NVGRE | \
868  RTE_ETH_RSS_MPLS)
869 
870 /*
871  * Definitions used for redirection table entry size.
872  * Some RSS RETA sizes may not be supported by some drivers, check the
873  * documentation or the description of relevant functions for more details.
874  */
875 #define RTE_ETH_RSS_RETA_SIZE_64 64
876 #define RTE_ETH_RSS_RETA_SIZE_128 128
877 #define RTE_ETH_RSS_RETA_SIZE_256 256
878 #define RTE_ETH_RSS_RETA_SIZE_512 512
879 #define RTE_ETH_RETA_GROUP_SIZE 64
880 
882 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
883 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
884 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
885 #define RTE_ETH_DCB_NUM_QUEUES 128
889 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
890 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
894 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
895 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
896 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
897 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
899 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
900 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
901 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
902 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
903 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
906 /* Definitions used for receive MAC address */
907 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
909 /* Definitions used for unicast hash */
910 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
916 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
918 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
920 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
922 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
924 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
935  uint64_t mask;
937  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
938 };
939 
946  RTE_ETH_8_TCS = 8
947 };
948 
957  RTE_ETH_64_POOLS = 64
958 };
959 
960 /* This structure may be extended in future. */
961 struct rte_eth_dcb_rx_conf {
962  enum rte_eth_nb_tcs nb_tcs;
964  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
965 };
966 
967 struct rte_eth_vmdq_dcb_tx_conf {
968  enum rte_eth_nb_pools nb_queue_pools;
970  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
971 };
972 
973 struct rte_eth_dcb_tx_conf {
974  enum rte_eth_nb_tcs nb_tcs;
977 };
978 
979 struct rte_eth_vmdq_tx_conf {
980  enum rte_eth_nb_pools nb_queue_pools;
981 };
982 
997  uint8_t default_pool;
998  uint8_t nb_pool_maps;
999  struct {
1000  uint16_t vlan_id;
1001  uint64_t pools;
1005 };
1006 
1028  uint8_t default_pool;
1030  uint8_t nb_pool_maps;
1031  uint32_t rx_mode;
1032  struct {
1033  uint16_t vlan_id;
1034  uint64_t pools;
1036 };
1037 
1048  uint64_t offloads;
1049 
1050  uint16_t pvid;
1051  __extension__
1052  uint8_t
1058 
1059  uint64_t reserved_64s[2];
1060  void *reserved_ptrs[2];
1061 };
1062 
1124  struct rte_mempool *mp;
1125  uint16_t length;
1126  uint16_t offset;
1138  uint32_t proto_hdr;
1139 };
1140 
1148  /* The settings for buffer split offload. */
1149  struct rte_eth_rxseg_split split;
1150  /* The other features settings should be added here. */
1151 };
1152 
1157  struct rte_eth_thresh rx_thresh;
1158  uint16_t rx_free_thresh;
1159  uint8_t rx_drop_en;
1161  uint16_t rx_nseg;
1168  uint16_t share_group;
1169  uint16_t share_qid;
1175  uint64_t offloads;
1184 
1205  uint16_t rx_nmempool;
1207  uint64_t reserved_64s[2];
1208  void *reserved_ptrs[2];
1209 };
1210 
1215  struct rte_eth_thresh tx_thresh;
1216  uint16_t tx_rs_thresh;
1217  uint16_t tx_free_thresh;
1226  uint64_t offloads;
1227 
1228  uint64_t reserved_64s[2];
1229  void *reserved_ptrs[2];
1230 };
1231 
1244 
1249  uint32_t rte_memory:1;
1250 
1251  uint32_t reserved:30;
1252 };
1253 
1262  uint16_t max_nb_queues;
1264  uint16_t max_rx_2_tx;
1266  uint16_t max_tx_2_rx;
1267  uint16_t max_nb_desc;
1270 };
1271 
1272 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1273 
1281  uint16_t port;
1282  uint16_t queue;
1283 };
1284 
1292  uint32_t peer_count:16;
1303  uint32_t tx_explicit:1;
1304 
1316  uint32_t manual_bind:1;
1317 
1330 
1342  uint32_t use_rte_memory:1;
1343 
1354  uint32_t force_memory:1;
1355 
1356  uint32_t reserved:11;
1358  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1359 };
1360 
1365  uint16_t nb_max;
1366  uint16_t nb_min;
1367  uint16_t nb_align;
1377  uint16_t nb_seg_max;
1378 
1390  uint16_t nb_mtu_seg_max;
1391 };
1392 
1401 };
1402 
1409  uint32_t high_water;
1410  uint32_t low_water;
1411  uint16_t pause_time;
1412  uint16_t send_xon;
1413  enum rte_eth_fc_mode mode;
1415  uint8_t autoneg;
1416 };
1417 
1424  struct rte_eth_fc_conf fc;
1425  uint8_t priority;
1426 };
1427 
1438  uint8_t tc_max;
1441 };
1442 
1461  enum rte_eth_fc_mode mode;
1463  struct {
1464  uint16_t tx_qid;
1468  uint8_t tc;
1469  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1470 
1471  struct {
1472  uint16_t pause_time;
1473  uint16_t rx_qid;
1477  uint8_t tc;
1478  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1479 };
1480 
1486  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1487  RTE_ETH_TUNNEL_TYPE_VXLAN,
1488  RTE_ETH_TUNNEL_TYPE_GENEVE,
1489  RTE_ETH_TUNNEL_TYPE_TEREDO,
1490  RTE_ETH_TUNNEL_TYPE_NVGRE,
1491  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1492  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1493  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1494  RTE_ETH_TUNNEL_TYPE_ECPRI,
1495  RTE_ETH_TUNNEL_TYPE_MAX,
1496 };
1497 
1498 #ifdef __cplusplus
1499 }
1500 #endif
1501 
1502 /* Deprecated API file for rte_eth_dev_filter_* functions */
1503 #include "rte_eth_ctrl.h"
1504 
1505 #ifdef __cplusplus
1506 extern "C" {
1507 #endif
1508 
1519  uint16_t udp_port;
1520  uint8_t prot_type;
1521 };
1522 
1528  uint32_t lsc:1;
1530  uint32_t rxq:1;
1532  uint32_t rmv:1;
1533 };
1534 
1535 #define rte_intr_conf rte_eth_intr_conf
1536 
1543  uint32_t link_speeds;
1550  struct rte_eth_rxmode rxmode;
1551  struct rte_eth_txmode txmode;
1552  uint32_t lpbk_mode;
1557  struct {
1558  struct rte_eth_rss_conf rss_conf;
1562  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1566  union {
1568  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1570  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1572  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1577  struct rte_eth_intr_conf intr_conf;
1578 };
1579 
1583 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1584 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1585 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1586 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1587 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1588 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1589 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1590 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1591 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1592 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1593 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1599 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1600 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1601 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1602 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1603 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1604 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1605 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1606 
1607 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1608  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1609  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1610 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1611  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1612  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1613  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1614 
1615 /*
1616  * If new Rx offload capabilities are defined, they also must be
1617  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1618  */
1619 
1623 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1624 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1625 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1626 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1627 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1628 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1629 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1630 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1631 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1632 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1633 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1634 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1635 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1636 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1641 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1643 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1651 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1652 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1658 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1664 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1666 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1672 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1673 /*
1674  * If new Tx offload capabilities are defined, they also must be
1675  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1676  */
1677 
1682 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1684 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1694 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1696 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1698 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1701 /*
1702  * Fallback default preferred Rx/Tx port parameters.
1703  * These are used if an application requests default parameters
1704  * but the PMD does not provide preferred values.
1705  */
1706 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1707 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1708 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1709 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1710 
1717  uint16_t burst_size;
1718  uint16_t ring_size;
1719  uint16_t nb_queues;
1720 };
1721 
1726 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1727 
1732  const char *name;
1733  uint16_t domain_id;
1741  uint16_t port_id;
1747  uint16_t rx_domain;
1748 };
1749 
1757  __extension__
1758  uint32_t multi_pools:1;
1759  uint32_t offset_allowed:1;
1760  uint32_t offset_align_log2:4;
1761  uint16_t max_nseg;
1762  uint16_t reserved;
1763 };
1764 
1777 };
1778 
1799 };
1800 
1807  struct rte_device *device;
1808  const char *driver_name;
1809  unsigned int if_index;
1811  uint16_t min_mtu;
1812  uint16_t max_mtu;
1813  const uint32_t *dev_flags;
1815  uint32_t min_rx_bufsize;
1822  uint32_t max_rx_bufsize;
1823  uint32_t max_rx_pktlen;
1826  uint16_t max_rx_queues;
1827  uint16_t max_tx_queues;
1828  uint32_t max_mac_addrs;
1831  uint16_t max_vfs;
1832  uint16_t max_vmdq_pools;
1843  uint16_t reta_size;
1844  uint8_t hash_key_size;
1845  uint32_t rss_algo_capa;
1850  uint16_t vmdq_queue_base;
1851  uint16_t vmdq_queue_num;
1852  uint16_t vmdq_pool_base;
1853  struct rte_eth_desc_lim rx_desc_lim;
1854  struct rte_eth_desc_lim tx_desc_lim;
1855  uint32_t speed_capa;
1857  uint16_t nb_rx_queues;
1858  uint16_t nb_tx_queues;
1871  uint64_t dev_capa;
1879 
1880  uint64_t reserved_64s[2];
1881  void *reserved_ptrs[2];
1882 };
1883 
1885 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1886 #define RTE_ETH_QUEUE_STATE_STARTED 1
1887 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1895  struct rte_mempool *mp;
1896  struct rte_eth_rxconf conf;
1897  uint8_t scattered_rx;
1898  uint8_t queue_state;
1899  uint16_t nb_desc;
1900  uint16_t rx_buf_size;
1907  uint8_t avail_thresh;
1908 };
1909 
1915  struct rte_eth_txconf conf;
1916  uint16_t nb_desc;
1917  uint8_t queue_state;
1918 };
1919 
1929  struct rte_mbuf **mbuf_ring;
1930  struct rte_mempool *mp;
1931  uint16_t *refill_head;
1932  uint16_t *receive_tail;
1933  uint16_t mbuf_ring_size;
1942 };
1943 
1944 /* Generic Burst mode flag definition, values can be ORed. */
1945 
1951 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1952 
1958  uint64_t flags;
1960 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1962 };
1963 
1965 #define RTE_ETH_XSTATS_NAME_SIZE 64
1966 
1977  uint64_t id;
1978  uint64_t value;
1979 };
1980 
1997 };
1998 
1999 #define RTE_ETH_DCB_NUM_TCS 8
2000 #define RTE_ETH_MAX_VMDQ_POOL 64
2001 
2008  struct {
2009  uint16_t base;
2010  uint16_t nb_queue;
2011  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
2013  struct {
2014  uint16_t base;
2015  uint16_t nb_queue;
2016  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
2017 };
2018 
2024  uint8_t nb_tcs;
2026  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
2029 };
2030 
2041 };
2042 
2043 /* Translate from FEC mode to FEC capa */
2044 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
2045 
2046 /* This macro indicates FEC capa mask */
2047 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
2048 
2049 /* A structure used to get capabilities per link speed */
2050 struct rte_eth_fec_capa {
2051  uint32_t speed;
2052  uint32_t capa;
2053 };
2054 
2055 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2056 
2057 /* Macros to check for valid port */
2058 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2059  if (!rte_eth_dev_is_valid_port(port_id)) { \
2060  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2061  return retval; \
2062  } \
2063 } while (0)
2064 
2065 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2066  if (!rte_eth_dev_is_valid_port(port_id)) { \
2067  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2068  return; \
2069  } \
2070 } while (0)
2071 
2094 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2095  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2096  void *user_param);
2097 
2118 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2119  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2120 
2131 };
2132 
2133 struct rte_eth_dev_sriov {
2134  uint8_t active;
2135  uint8_t nb_q_per_pool;
2136  uint16_t def_vmdq_idx;
2137  uint16_t def_pool_q_idx;
2138 };
2139 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2140 
2141 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2142 
2143 #define RTE_ETH_DEV_NO_OWNER 0
2144 
2145 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2146 
2147 struct rte_eth_dev_owner {
2148  uint64_t id;
2149  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2150 };
2151 
2157 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2159 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2161 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2163 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2165 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2167 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2172 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2186 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2187  const uint64_t owner_id);
2188 
2192 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2193  for (p = rte_eth_find_next_owned_by(0, o); \
2194  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2195  p = rte_eth_find_next_owned_by(p + 1, o))
2196 
2205 uint16_t rte_eth_find_next(uint16_t port_id);
2206 
2210 #define RTE_ETH_FOREACH_DEV(p) \
2211  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2212 
2224 uint16_t
2225 rte_eth_find_next_of(uint16_t port_id_start,
2226  const struct rte_device *parent);
2227 
2236 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2237  for (port_id = rte_eth_find_next_of(0, parent); \
2238  port_id < RTE_MAX_ETHPORTS; \
2239  port_id = rte_eth_find_next_of(port_id + 1, parent))
2240 
2252 uint16_t
2253 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2254 
2265 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2266  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2267  port_id < RTE_MAX_ETHPORTS; \
2268  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2269 
2280 int rte_eth_dev_owner_new(uint64_t *owner_id);
2281 
2292 int rte_eth_dev_owner_set(const uint16_t port_id,
2293  const struct rte_eth_dev_owner *owner);
2294 
2305 int rte_eth_dev_owner_unset(const uint16_t port_id,
2306  const uint64_t owner_id);
2307 
2316 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2317 
2328 int rte_eth_dev_owner_get(const uint16_t port_id,
2329  struct rte_eth_dev_owner *owner);
2330 
2342 
2352 
2364 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2365 
2374 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2375 
2384 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2385 
2397 __rte_experimental
2398 const char *rte_eth_dev_capability_name(uint64_t capability);
2399 
2439 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2440  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2441 
2450 int
2451 rte_eth_dev_is_removed(uint16_t port_id);
2452 
2515 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2516  uint16_t nb_rx_desc, unsigned int socket_id,
2517  const struct rte_eth_rxconf *rx_conf,
2518  struct rte_mempool *mb_pool);
2519 
2547 __rte_experimental
2549  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2550  const struct rte_eth_hairpin_conf *conf);
2551 
2600 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2601  uint16_t nb_tx_desc, unsigned int socket_id,
2602  const struct rte_eth_txconf *tx_conf);
2603 
2629 __rte_experimental
2631  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2632  const struct rte_eth_hairpin_conf *conf);
2633 
2660 __rte_experimental
2661 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2662  size_t len, uint32_t direction);
2663 
2686 __rte_experimental
2687 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2688 
2713 __rte_experimental
2714 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2715 
2731 __rte_experimental
2732 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2733 
2761 __rte_experimental
2762 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2763  uint8_t affinity);
2764 
2777 int rte_eth_dev_socket_id(uint16_t port_id);
2778 
2788 int rte_eth_dev_is_valid_port(uint16_t port_id);
2789 
2806 __rte_experimental
2807 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2808 
2825 __rte_experimental
2826 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2827 
2845 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2846 
2863 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2864 
2882 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2883 
2900 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2901 
2925 int rte_eth_dev_start(uint16_t port_id);
2926 
2940 int rte_eth_dev_stop(uint16_t port_id);
2941 
2954 int rte_eth_dev_set_link_up(uint16_t port_id);
2955 
2965 int rte_eth_dev_set_link_down(uint16_t port_id);
2966 
2977 int rte_eth_dev_close(uint16_t port_id);
2978 
3016 int rte_eth_dev_reset(uint16_t port_id);
3017 
3029 int rte_eth_promiscuous_enable(uint16_t port_id);
3030 
3042 int rte_eth_promiscuous_disable(uint16_t port_id);
3043 
3054 int rte_eth_promiscuous_get(uint16_t port_id);
3055 
3067 int rte_eth_allmulticast_enable(uint16_t port_id);
3068 
3080 int rte_eth_allmulticast_disable(uint16_t port_id);
3081 
3092 int rte_eth_allmulticast_get(uint16_t port_id);
3093 
3111 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
3113 
3128 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
3130 
3144 __rte_experimental
3145 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3146 
3158 __rte_experimental
3160 
3179 __rte_experimental
3180 int rte_eth_link_to_str(char *str, size_t len,
3181  const struct rte_eth_link *eth_link);
3182 
3203 __rte_experimental
3204 int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes);
3205 
3227 __rte_experimental
3228 int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes);
3229 
3252 __rte_experimental
3254  struct rte_eth_speed_lanes_capa *speed_lanes_capa,
3255  unsigned int num);
3256 
3274 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3275 
3287 int rte_eth_stats_reset(uint16_t port_id);
3288 
3318 int rte_eth_xstats_get_names(uint16_t port_id,
3319  struct rte_eth_xstat_name *xstats_names,
3320  unsigned int size);
3321 
3355 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3356  unsigned int n);
3357 
3382 int
3384  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3385  uint64_t *ids);
3386 
3411 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3412  uint64_t *values, unsigned int size);
3413 
3433 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3434  uint64_t *id);
3435 
3450 __rte_experimental
3451 int rte_eth_xstats_set_counter(uint16_t port_id, uint64_t id, int on_off);
3452 
3464 __rte_experimental
3465 int rte_eth_xstats_query_state(uint16_t port_id, uint64_t id);
3466 
3479 int rte_eth_xstats_reset(uint16_t port_id);
3480 
3499 __rte_deprecated
3501  uint16_t tx_queue_id, uint8_t stat_idx);
3502 
3521 __rte_deprecated
3523  uint16_t rx_queue_id,
3524  uint8_t stat_idx);
3525 
3539 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3540 
3561 __rte_experimental
3562 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3563  unsigned int num);
3564 
3584 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3586 
3602 __rte_experimental
3603 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3605 
3626 int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3628 
3668 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3669  uint32_t *ptypes, int num)
3671 
3702 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3703  uint32_t *set_ptypes, unsigned int num);
3704 
3717 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3718 
3736 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3737 
3757 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3758 
3777 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3778  int on);
3779 
3796 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3797  enum rte_vlan_type vlan_type,
3798  uint16_t tag_type);
3799 
3817 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3818 
3832 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3833 
3848 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3849 
3875 __rte_experimental
3876 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3877  uint8_t avail_thresh);
3878 
3905 __rte_experimental
3906 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3907  uint8_t *avail_thresh);
3908 
3909 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3910  void *userdata);
3911 
3917  buffer_tx_error_fn error_callback;
3918  void *error_userdata;
3919  uint16_t size;
3920  uint16_t length;
3922  struct rte_mbuf *pkts[];
3923 };
3924 
3931 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3932  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3933 
3944 int
3945 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3946 
3971 int
3973  buffer_tx_error_fn callback, void *userdata);
3974 
3997 void
3998 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3999  void *userdata);
4000 
4024 void
4025 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
4026  void *userdata);
4027 
4053 int
4054 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
4055 
4088 };
4089 
4109 };
4110 
4129  uint64_t metadata;
4130 };
4131 
4169 };
4170 
4195  uint64_t metadata;
4196 };
4197 
4280 };
4281 
4295 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4296  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4297 
4315 int rte_eth_dev_callback_register(uint16_t port_id,
4316  enum rte_eth_event_type event,
4317  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4318 
4337 int rte_eth_dev_callback_unregister(uint16_t port_id,
4338  enum rte_eth_event_type event,
4339  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4340 
4362 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4363 
4384 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4385 
4403 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4404 
4426 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4427  int epfd, int op, void *data);
4428 
4443 int
4444 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4445 
4459 int rte_eth_led_on(uint16_t port_id);
4460 
4474 int rte_eth_led_off(uint16_t port_id);
4475 
4504 __rte_experimental
4505 int rte_eth_fec_get_capability(uint16_t port_id,
4506  struct rte_eth_fec_capa *speed_fec_capa,
4507  unsigned int num);
4508 
4529 __rte_experimental
4530 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4531 
4555 __rte_experimental
4556 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4557 
4572 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4573  struct rte_eth_fc_conf *fc_conf);
4574 
4589 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4590  struct rte_eth_fc_conf *fc_conf);
4591 
4608  struct rte_eth_pfc_conf *pfc_conf);
4609 
4628 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4629  uint32_t pool);
4630 
4648 __rte_experimental
4650  struct rte_eth_pfc_queue_info *pfc_queue_info);
4651 
4675 __rte_experimental
4677  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4678 
4693 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4694  struct rte_ether_addr *mac_addr);
4695 
4714  struct rte_ether_addr *mac_addr);
4715 
4733 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4734  struct rte_eth_rss_reta_entry64 *reta_conf,
4735  uint16_t reta_size);
4736 
4755 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4756  struct rte_eth_rss_reta_entry64 *reta_conf,
4757  uint16_t reta_size);
4758 
4778 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4779  uint8_t on);
4780 
4799 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4800 
4817 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4818  uint32_t tx_rate);
4819 
4834 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4835  struct rte_eth_rss_conf *rss_conf);
4836 
4852 int
4854  struct rte_eth_rss_conf *rss_conf);
4855 
4868 __rte_experimental
4869 const char *
4871 
4888 __rte_experimental
4889 int
4890 rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4891 
4916 int
4918  struct rte_eth_udp_tunnel *tunnel_udp);
4919 
4939 int
4941  struct rte_eth_udp_tunnel *tunnel_udp);
4942 
4957 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4958  struct rte_eth_dcb_info *dcb_info);
4959 
4960 struct rte_eth_rxtx_callback;
4961 
4987 const struct rte_eth_rxtx_callback *
4988 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4989  rte_rx_callback_fn fn, void *user_param);
4990 
5017 const struct rte_eth_rxtx_callback *
5018 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5019  rte_rx_callback_fn fn, void *user_param);
5020 
5046 const struct rte_eth_rxtx_callback *
5047 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5048  rte_tx_callback_fn fn, void *user_param);
5049 
5083 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5084  const struct rte_eth_rxtx_callback *user_cb);
5085 
5119 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5120  const struct rte_eth_rxtx_callback *user_cb);
5121 
5141 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5142  struct rte_eth_rxq_info *qinfo);
5143 
5163 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5164  struct rte_eth_txq_info *qinfo);
5165 
5186 __rte_experimental
5188  uint16_t queue_id,
5189  struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5190 
5209 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5210  struct rte_eth_burst_mode *mode);
5211 
5230 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5231  struct rte_eth_burst_mode *mode);
5232 
5253 __rte_experimental
5254 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5255  struct rte_power_monitor_cond *pmc);
5256 
5283 __rte_experimental
5284 int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info);
5285 
5304 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5306 
5319 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5320 
5337 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5338 
5355 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5356 
5375 __rte_experimental
5376 int
5377 rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
5379 
5399 __rte_experimental
5400 int
5401 rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5403 
5423 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5424  struct rte_ether_addr *mc_addr_set,
5425  uint32_t nb_mc_addr);
5426 
5439 int rte_eth_timesync_enable(uint16_t port_id);
5440 
5453 int rte_eth_timesync_disable(uint16_t port_id);
5454 
5474  struct timespec *timestamp, uint32_t flags);
5475 
5492  struct timespec *timestamp);
5493 
5511 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5512 
5553 __rte_experimental
5554 int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm);
5555 
5571 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5572 
5591 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5592 
5638 __rte_experimental
5639 int
5640 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5641 
5657 int
5658 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5659 
5676 int
5677 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5678 
5696  uint16_t *nb_rx_desc,
5697  uint16_t *nb_tx_desc);
5698 
5713 int
5714 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5715 
5725 void *
5726 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5727 
5743 __rte_experimental
5745  struct rte_eth_hairpin_cap *cap);
5746 
5756  int pf;
5757  __extension__
5758  union {
5759  int vf;
5760  int sf;
5761  };
5762  uint32_t id_base;
5763  uint32_t id_end;
5764  char name[RTE_DEV_NAME_MAX_LEN];
5765 };
5766 
5774  uint16_t controller;
5775  uint16_t pf;
5776  uint32_t nb_ranges_alloc;
5777  uint32_t nb_ranges;
5779 };
5780 
5804 __rte_experimental
5805 int rte_eth_representor_info_get(uint16_t port_id,
5806  struct rte_eth_representor_info *info);
5807 
5809 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5810 
5812 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5813 
5815 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5816 
5856 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5857 
5859 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5861 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5862 
5873  uint32_t timeout_ms;
5875  uint16_t max_frags;
5880  uint16_t flags;
5881 };
5882 
5903 __rte_experimental
5905  struct rte_eth_ip_reassembly_params *capa);
5906 
5928 __rte_experimental
5929 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5930  struct rte_eth_ip_reassembly_params *conf);
5931 
5961 __rte_experimental
5962 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5963  const struct rte_eth_ip_reassembly_params *conf);
5964 
5972 typedef struct {
5979  uint16_t time_spent;
5981  uint16_t nb_frags;
5983 
6002 __rte_experimental
6003 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
6004 
6028 __rte_experimental
6029 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6030  uint16_t offset, uint16_t num, FILE *file);
6031 
6055 __rte_experimental
6056 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6057  uint16_t offset, uint16_t num, FILE *file);
6058 
6059 
6060 /* Congestion management */
6061 
6071 };
6072 
6089  uint64_t objs_supported;
6094  uint8_t rsvd[8];
6095 };
6096 
6105  enum rte_eth_cman_obj obj;
6107  enum rte_cman_mode mode;
6108  union {
6115  uint16_t rx_queue;
6122  uint8_t rsvd_obj_params[4];
6123  } obj_param;
6124  union {
6130  struct rte_cman_red_params red;
6137  uint8_t rsvd_mode_params[4];
6138  } mode_param;
6139 };
6140 
6158 __rte_experimental
6159 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
6160 
6178 __rte_experimental
6179 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
6180 
6197 __rte_experimental
6198 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
6199 
6220 __rte_experimental
6221 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
6222 
6223 #ifdef __cplusplus
6224 }
6225 #endif
6226 
6227 #include <rte_ethdev_core.h>
6228 
6229 #ifdef __cplusplus
6230 extern "C" {
6231 #endif
6232 
6256 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
6257  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
6258  void *opaque);
6259 
6347 static inline uint16_t
6348 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6349  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6350 {
6351  uint16_t nb_rx;
6352  struct rte_eth_fp_ops *p;
6353  void *qd;
6354 
6355 #ifdef RTE_ETHDEV_DEBUG_RX
6356  if (port_id >= RTE_MAX_ETHPORTS ||
6357  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6358  RTE_ETHDEV_LOG_LINE(ERR,
6359  "Invalid port_id=%u or queue_id=%u",
6360  port_id, queue_id);
6361  return 0;
6362  }
6363 #endif
6364 
6365  /* fetch pointer to queue data */
6366  p = &rte_eth_fp_ops[port_id];
6367  qd = p->rxq.data[queue_id];
6368 
6369 #ifdef RTE_ETHDEV_DEBUG_RX
6370  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6371 
6372  if (qd == NULL) {
6373  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6374  queue_id, port_id);
6375  return 0;
6376  }
6377 #endif
6378 
6379  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6380 
6382 
6383 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6384  {
6385  void *cb;
6386 
6387  /* rte_memory_order_release memory order was used when the
6388  * call back was inserted into the list.
6389  * Since there is a clear dependency between loading
6390  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6391  * not required.
6392  */
6393  cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6394  rte_memory_order_relaxed);
6395  if (unlikely(cb != NULL))
6396  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6397  rx_pkts, nb_rx, nb_pkts, cb);
6398  }
6399 #endif
6400 
6401  if (unlikely(nb_rx))
6402  rte_ethdev_trace_rx_burst_nonempty(port_id, queue_id, (void **)rx_pkts, nb_rx);
6403  else
6404  rte_ethdev_trace_rx_burst_empty(port_id, queue_id, (void **)rx_pkts);
6405  return nb_rx;
6406 }
6407 
6425 static inline int
6426 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6427 {
6428  struct rte_eth_fp_ops *p;
6429  void *qd;
6430 
6431 #ifdef RTE_ETHDEV_DEBUG_RX
6432  if (port_id >= RTE_MAX_ETHPORTS ||
6433  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6434  RTE_ETHDEV_LOG_LINE(ERR,
6435  "Invalid port_id=%u or queue_id=%u",
6436  port_id, queue_id);
6437  return -EINVAL;
6438  }
6439 #endif
6440 
6441  /* fetch pointer to queue data */
6442  p = &rte_eth_fp_ops[port_id];
6443  qd = p->rxq.data[queue_id];
6444 
6445 #ifdef RTE_ETHDEV_DEBUG_RX
6446  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6447  if (qd == NULL)
6448  return -EINVAL;
6449 #endif
6450 
6451  return p->rx_queue_count(qd);
6452 }
6453 
6457 #define RTE_ETH_RX_DESC_AVAIL 0
6458 #define RTE_ETH_RX_DESC_DONE 1
6459 #define RTE_ETH_RX_DESC_UNAVAIL 2
6495 static inline int
6496 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6497  uint16_t offset)
6498 {
6499  struct rte_eth_fp_ops *p;
6500  void *qd;
6501 
6502 #ifdef RTE_ETHDEV_DEBUG_RX
6503  if (port_id >= RTE_MAX_ETHPORTS ||
6504  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6505  RTE_ETHDEV_LOG_LINE(ERR,
6506  "Invalid port_id=%u or queue_id=%u",
6507  port_id, queue_id);
6508  return -EINVAL;
6509  }
6510 #endif
6511 
6512  /* fetch pointer to queue data */
6513  p = &rte_eth_fp_ops[port_id];
6514  qd = p->rxq.data[queue_id];
6515 
6516 #ifdef RTE_ETHDEV_DEBUG_RX
6517  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6518  if (qd == NULL)
6519  return -ENODEV;
6520 #endif
6521  return p->rx_descriptor_status(qd, offset);
6522 }
6523 
6527 #define RTE_ETH_TX_DESC_FULL 0
6528 #define RTE_ETH_TX_DESC_DONE 1
6529 #define RTE_ETH_TX_DESC_UNAVAIL 2
6565 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6566  uint16_t queue_id, uint16_t offset)
6567 {
6568  struct rte_eth_fp_ops *p;
6569  void *qd;
6570 
6571 #ifdef RTE_ETHDEV_DEBUG_TX
6572  if (port_id >= RTE_MAX_ETHPORTS ||
6573  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6574  RTE_ETHDEV_LOG_LINE(ERR,
6575  "Invalid port_id=%u or queue_id=%u",
6576  port_id, queue_id);
6577  return -EINVAL;
6578  }
6579 #endif
6580 
6581  /* fetch pointer to queue data */
6582  p = &rte_eth_fp_ops[port_id];
6583  qd = p->txq.data[queue_id];
6584 
6585 #ifdef RTE_ETHDEV_DEBUG_TX
6586  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6587  if (qd == NULL)
6588  return -ENODEV;
6589 #endif
6590  return p->tx_descriptor_status(qd, offset);
6591 }
6592 
6612 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6613  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6614 
6686 static inline uint16_t
6687 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6688  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6689 {
6690  struct rte_eth_fp_ops *p;
6691  void *qd;
6692 
6693 #ifdef RTE_ETHDEV_DEBUG_TX
6694  if (port_id >= RTE_MAX_ETHPORTS ||
6695  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6696  RTE_ETHDEV_LOG_LINE(ERR,
6697  "Invalid port_id=%u or queue_id=%u",
6698  port_id, queue_id);
6699  return 0;
6700  }
6701 #endif
6702 
6703  /* fetch pointer to queue data */
6704  p = &rte_eth_fp_ops[port_id];
6705  qd = p->txq.data[queue_id];
6706 
6707 #ifdef RTE_ETHDEV_DEBUG_TX
6708  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6709 
6710  if (qd == NULL) {
6711  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6712  queue_id, port_id);
6713  return 0;
6714  }
6715 #endif
6716 
6717 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6718  {
6719  void *cb;
6720 
6721  /* rte_memory_order_release memory order was used when the
6722  * call back was inserted into the list.
6723  * Since there is a clear dependency between loading
6724  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6725  * not required.
6726  */
6727  cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6728  rte_memory_order_relaxed);
6729  if (unlikely(cb != NULL))
6730  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6731  tx_pkts, nb_pkts, cb);
6732  }
6733 #endif
6734 
6735  uint16_t requested_pkts = nb_pkts;
6737 
6738  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6739 
6740  if (requested_pkts > nb_pkts)
6741  rte_mbuf_history_mark_bulk(tx_pkts + nb_pkts,
6742  requested_pkts - nb_pkts, RTE_MBUF_HISTORY_OP_TX_BUSY);
6743 
6744  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6745  return nb_pkts;
6746 }
6747 
6801 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6802 
6803 static inline uint16_t
6804 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6805  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6806 {
6807  struct rte_eth_fp_ops *p;
6808  void *qd;
6809 
6810 #ifdef RTE_ETHDEV_DEBUG_TX
6811  if (port_id >= RTE_MAX_ETHPORTS ||
6812  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6813  RTE_ETHDEV_LOG_LINE(ERR,
6814  "Invalid port_id=%u or queue_id=%u",
6815  port_id, queue_id);
6816  rte_errno = ENODEV;
6817  return 0;
6818  }
6819 #endif
6820 
6821  /* fetch pointer to queue data */
6822  p = &rte_eth_fp_ops[port_id];
6823  qd = p->txq.data[queue_id];
6824 
6825 #ifdef RTE_ETHDEV_DEBUG_TX
6826  if (!rte_eth_dev_is_valid_port(port_id)) {
6827  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6828  rte_errno = ENODEV;
6829  return 0;
6830  }
6831  if (qd == NULL) {
6832  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6833  queue_id, port_id);
6834  rte_errno = EINVAL;
6835  return 0;
6836  }
6837 #endif
6838 
6840 
6841  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6842 }
6843 
6844 #else
6845 
6846 /*
6847  * Native NOOP operation for compilation targets which doesn't require any
6848  * preparations steps, and functional NOOP may introduce unnecessary performance
6849  * drop.
6850  *
6851  * Generally this is not a good idea to turn it on globally and didn't should
6852  * be used if behavior of tx_preparation can change.
6853  */
6854 
6855 static inline uint16_t
6856 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6857  __rte_unused uint16_t queue_id,
6858  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6859 {
6860  return nb_pkts;
6861 }
6862 
6863 #endif
6864 
6887 static inline uint16_t
6888 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6889  struct rte_eth_dev_tx_buffer *buffer)
6890 {
6891  uint16_t sent;
6892  uint16_t to_send = buffer->length;
6893 
6894  if (to_send == 0)
6895  return 0;
6896 
6897  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6898 
6899  buffer->length = 0;
6900 
6901  /* All packets sent, or to be dealt with by callback below */
6902  if (unlikely(sent != to_send))
6903  buffer->error_callback(&buffer->pkts[sent],
6904  (uint16_t)(to_send - sent),
6905  buffer->error_userdata);
6906 
6907  return sent;
6908 }
6909 
6940 static __rte_always_inline uint16_t
6941 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6942  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6943 {
6944  buffer->pkts[buffer->length++] = tx_pkt;
6945  if (buffer->length < buffer->size)
6946  return 0;
6947 
6948  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6949 }
6950 
7004 __rte_experimental
7005 static inline uint16_t
7006 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
7007  uint16_t tx_port_id, uint16_t tx_queue_id,
7008  struct rte_eth_recycle_rxq_info *recycle_rxq_info)
7009 {
7010  struct rte_eth_fp_ops *p1, *p2;
7011  void *qd1, *qd2;
7012  uint16_t nb_mbufs;
7013 
7014 #ifdef RTE_ETHDEV_DEBUG_TX
7015  if (tx_port_id >= RTE_MAX_ETHPORTS ||
7016  tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7017  RTE_ETHDEV_LOG_LINE(ERR,
7018  "Invalid tx_port_id=%u or tx_queue_id=%u",
7019  tx_port_id, tx_queue_id);
7020  return 0;
7021  }
7022 #endif
7023 
7024  /* fetch pointer to Tx queue data */
7025  p1 = &rte_eth_fp_ops[tx_port_id];
7026  qd1 = p1->txq.data[tx_queue_id];
7027 
7028 #ifdef RTE_ETHDEV_DEBUG_TX
7029  RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
7030 
7031  if (qd1 == NULL) {
7032  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
7033  tx_queue_id, tx_port_id);
7034  return 0;
7035  }
7036 #endif
7037 
7038 #ifdef RTE_ETHDEV_DEBUG_RX
7039  if (rx_port_id >= RTE_MAX_ETHPORTS ||
7040  rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7041  RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
7042  rx_port_id, rx_queue_id);
7043  return 0;
7044  }
7045 #endif
7046 
7047  /* fetch pointer to Rx queue data */
7048  p2 = &rte_eth_fp_ops[rx_port_id];
7049  qd2 = p2->rxq.data[rx_queue_id];
7050 
7051 #ifdef RTE_ETHDEV_DEBUG_RX
7052  RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
7053 
7054  if (qd2 == NULL) {
7055  RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
7056  rx_queue_id, rx_port_id);
7057  return 0;
7058  }
7059 #endif
7060 
7061  /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
7062  * into Rx mbuf ring.
7063  */
7064  nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
7065 
7066  /* If no recycling mbufs, return 0. */
7067  if (nb_mbufs == 0)
7068  return 0;
7069 
7070  /* Replenish the Rx descriptors with the recycling
7071  * into Rx mbuf ring.
7072  */
7073  p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
7074 
7075  return nb_mbufs;
7076 }
7077 
7106 __rte_experimental
7107 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
7109 
7144 __rte_experimental
7145 static inline int
7146 rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
7147 {
7148  struct rte_eth_fp_ops *fops;
7149  void *qd;
7150  int rc;
7151 
7152 #ifdef RTE_ETHDEV_DEBUG_TX
7153  if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
7154  RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
7155  return -ENODEV;
7156  }
7157 
7158  if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7159  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7160  queue_id, port_id);
7161  return -EINVAL;
7162  }
7163 #endif
7164 
7165  /* Fetch pointer to Tx queue data */
7166  fops = &rte_eth_fp_ops[port_id];
7167  qd = fops->txq.data[queue_id];
7168 
7169 #ifdef RTE_ETHDEV_DEBUG_TX
7170  if (qd == NULL) {
7171  RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7172  queue_id, port_id);
7173  return -EINVAL;
7174  }
7175 #endif
7176  rc = fops->tx_queue_count(qd);
7177  rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
7178  return rc;
7179 }
7180 
7181 #ifdef __cplusplus
7182 }
7183 #endif
7184 
7185 #endif /* _RTE_ETHDEV_H_ */
#define RTE_BIT32(nr)
Definition: rte_bitops.h:44
#define unlikely(x)
rte_cman_mode
Definition: rte_cman.h:16
#define __rte_cache_min_aligned
Definition: rte_common.h:742
#define __rte_unused
Definition: rte_common.h:248
#define __rte_always_inline
Definition: rte_common.h:490
#define __rte_warn_unused_result
Definition: rte_common.h:481
#define rte_errno
Definition: rte_errno.h:29
rte_eth_nb_pools
Definition: rte_ethdev.h:953
@ RTE_ETH_64_POOLS
Definition: rte_ethdev.h:957
@ RTE_ETH_32_POOLS
Definition: rte_ethdev.h:956
@ RTE_ETH_8_POOLS
Definition: rte_ethdev.h:954
@ RTE_ETH_16_POOLS
Definition: rte_ethdev.h:955
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:4136
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_END
Definition: rte_ethdev.h:4140
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
Definition: rte_ethdev.h:4142
@ RTE_ETH_EVENT_IPSEC_MAX
Definition: rte_ethdev.h:4168
@ RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
Definition: rte_ethdev.h:4156
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
Definition: rte_ethdev.h:4144
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
Definition: rte_ethdev.h:4161
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_START
Definition: rte_ethdev.h:4138
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
Definition: rte_ethdev.h:4151
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
Definition: rte_ethdev.h:4146
@ RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
Definition: rte_ethdev.h:4166
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:707
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6941
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:4060
@ RTE_ETH_SUBEVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:4062
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1
Definition: rte_ethdev.h:4072
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48
Definition: rte_ethdev.h:4077
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1
Definition: rte_ethdev.h:4087
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1
Definition: rte_ethdev.h:4082
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1
Definition: rte_ethdev.h:4067
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
rte_eth_event_type
Definition: rte_ethdev.h:4201
@ RTE_ETH_EVENT_RECOVERY_FAILED
Definition: rte_ethdev.h:4278
@ RTE_ETH_EVENT_UNKNOWN
Definition: rte_ethdev.h:4202
@ RTE_ETH_EVENT_VF_MBOX
Definition: rte_ethdev.h:4208
@ RTE_ETH_EVENT_IPSEC
Definition: rte_ethdev.h:4219
@ RTE_ETH_EVENT_INTR_RESET
Definition: rte_ethdev.h:4207
@ RTE_ETH_EVENT_INTR_RMV
Definition: rte_ethdev.h:4210
@ RTE_ETH_EVENT_ERR_RECOVERING
Definition: rte_ethdev.h:4242
@ RTE_ETH_EVENT_MACSEC
Definition: rte_ethdev.h:4209
@ RTE_ETH_EVENT_RECOVERY_SUCCESS
Definition: rte_ethdev.h:4273
@ RTE_ETH_EVENT_DESTROY
Definition: rte_ethdev.h:4218
@ RTE_ETH_EVENT_FLOW_AGED
Definition: rte_ethdev.h:4220
@ RTE_ETH_EVENT_QUEUE_STATE
Definition: rte_ethdev.h:4205
@ RTE_ETH_EVENT_INTR_LSC
Definition: rte_ethdev.h:4203
@ RTE_ETH_EVENT_MAX
Definition: rte_ethdev.h:4279
@ RTE_ETH_EVENT_RX_AVAIL_THRESH
Definition: rte_ethdev.h:4225
@ RTE_ETH_EVENT_NEW
Definition: rte_ethdev.h:4217
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_dev_is_valid_port(uint16_t port_id)
rte_eth_cman_obj
Definition: rte_ethdev.h:6063
@ RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL
Definition: rte_ethdev.h:6070
@ RTE_ETH_CMAN_OBJ_RX_QUEUE
Definition: rte_ethdev.h:6065
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:883
__rte_experimental int rte_eth_speed_lanes_get_capability(uint16_t port_id, struct rte_eth_speed_lanes_capa *speed_lanes_capa, unsigned int num)
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
Definition: rte_ethdev.h:1960
__rte_experimental int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes)
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
rte_eth_dev_state
Definition: rte_ethdev.h:2124
@ RTE_ETH_DEV_ATTACHED
Definition: rte_ethdev.h:2128
@ RTE_ETH_DEV_UNUSED
Definition: rte_ethdev.h:2126
@ RTE_ETH_DEV_REMOVED
Definition: rte_ethdev.h:2130
__rte_deprecated int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
rte_eth_link_connector
Definition: rte_ethdev.h:326
@ RTE_ETH_LINK_CONNECTOR_XAUI
Definition: rte_ethdev.h:340
@ RTE_ETH_LINK_CONNECTOR_SFP_DD
Definition: rte_ethdev.h:346
@ RTE_ETH_LINK_CONNECTOR_GAUI
Definition: rte_ethdev.h:339
@ RTE_ETH_LINK_CONNECTOR_MII
Definition: rte_ethdev.h:330
@ RTE_ETH_LINK_CONNECTOR_XLAUI
Definition: rte_ethdev.h:338
@ RTE_ETH_LINK_CONNECTOR_QSFP
Definition: rte_ethdev.h:347
@ RTE_ETH_LINK_CONNECTOR_SFP28
Definition: rte_ethdev.h:345
@ RTE_ETH_LINK_CONNECTOR_OTHER
Definition: rte_ethdev.h:352
@ RTE_ETH_LINK_CONNECTOR_AUI
Definition: rte_ethdev.h:329
@ RTE_ETH_LINK_CONNECTOR_SGMII
Definition: rte_ethdev.h:334
@ RTE_ETH_LINK_CONNECTOR_SFP
Definition: rte_ethdev.h:343
@ RTE_ETH_LINK_CONNECTOR_QSFP_PLUS
Definition: rte_ethdev.h:348
@ RTE_ETH_LINK_CONNECTOR_TP
Definition: rte_ethdev.h:328
@ RTE_ETH_LINK_CONNECTOR_QSFP28
Definition: rte_ethdev.h:349
@ RTE_ETH_LINK_CONNECTOR_DAC
Definition: rte_ethdev.h:333
@ RTE_ETH_LINK_CONNECTOR_QSFP56
Definition: rte_ethdev.h:350
@ RTE_ETH_LINK_CONNECTOR_QSFP_DD
Definition: rte_ethdev.h:351
@ RTE_ETH_LINK_CONNECTOR_SFI
Definition: rte_ethdev.h:337
@ RTE_ETH_LINK_CONNECTOR_CAUI
Definition: rte_ethdev.h:341
@ RTE_ETH_LINK_CONNECTOR_XFI
Definition: rte_ethdev.h:336
@ RTE_ETH_LINK_CONNECTOR_LAUI
Definition: rte_ethdev.h:342
@ RTE_ETH_LINK_CONNECTOR_FIBER
Definition: rte_ethdev.h:331
@ RTE_ETH_LINK_CONNECTOR_SFP_PLUS
Definition: rte_ethdev.h:344
@ RTE_ETH_LINK_CONNECTOR_QSGMII
Definition: rte_ethdev.h:335
@ RTE_ETH_LINK_CONNECTOR_NONE
Definition: rte_ethdev.h:327
@ RTE_ETH_LINK_CONNECTOR_BNC
Definition: rte_ethdev.h:332
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6348
rte_eth_fec_mode
Definition: rte_ethdev.h:2035
@ RTE_ETH_FEC_NOFEC
Definition: rte_ethdev.h:2036
@ RTE_ETH_FEC_BASER
Definition: rte_ethdev.h:2038
@ RTE_ETH_FEC_AUTO
Definition: rte_ethdev.h:2037
@ RTE_ETH_FEC_RS
Definition: rte_ethdev.h:2039
@ RTE_ETH_FEC_LLRS
Definition: rte_ethdev.h:2040
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) __rte_warn_unused_result
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1785
@ RTE_ETH_ERROR_HANDLE_MODE_PASSIVE
Definition: rte_ethdev.h:1792
@ RTE_ETH_ERROR_HANDLE_MODE_NONE
Definition: rte_ethdev.h:1787
@ RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE
Definition: rte_ethdev.h:1798
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) __rte_warn_unused_result
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:445
@ RTE_ETH_MQ_TX_DCB
Definition: rte_ethdev.h:447
@ RTE_ETH_MQ_TX_VMDQ_DCB
Definition: rte_ethdev.h:448
@ RTE_ETH_MQ_TX_VMDQ_ONLY
Definition: rte_ethdev.h:449
@ RTE_ETH_MQ_TX_NONE
Definition: rte_ethdev.h:446
int rte_eth_promiscuous_get(uint16_t port_id)
__rte_experimental int rte_eth_xstats_set_counter(uint16_t port_id, uint64_t id, int on_off)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
__rte_experimental int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
uint16_t rte_eth_find_next(uint16_t port_id)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:419
@ RTE_ETH_MQ_RX_DCB_RSS
Definition: rte_ethdev.h:428
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
Definition: rte_ethdev.h:437
@ RTE_ETH_MQ_RX_DCB
Definition: rte_ethdev.h:426
@ RTE_ETH_MQ_RX_VMDQ_DCB
Definition: rte_ethdev.h:435
@ RTE_ETH_MQ_RX_VMDQ_RSS
Definition: rte_ethdev.h:433
@ RTE_ETH_MQ_RX_NONE
Definition: rte_ethdev.h:421
@ RTE_ETH_MQ_RX_RSS
Definition: rte_ethdev.h:424
@ RTE_ETH_MQ_RX_VMDQ_ONLY
Definition: rte_ethdev.h:431
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_promiscuous_enable(uint16_t port_id)
rte_eth_representor_type
Definition: rte_ethdev.h:1772
@ RTE_ETH_REPRESENTOR_PF
Definition: rte_ethdev.h:1776
@ RTE_ETH_REPRESENTOR_VF
Definition: rte_ethdev.h:1774
@ RTE_ETH_REPRESENTOR_SF
Definition: rte_ethdev.h:1775
@ RTE_ETH_REPRESENTOR_NONE
Definition: rte_ethdev.h:1773
__rte_experimental int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm)
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:882
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf) __rte_warn_unused_result
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) __rte_warn_unused_result
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
__rte_deprecated int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2094
__rte_experimental int rte_eth_find_rss_algo(const char *name, uint32_t *algo)
__rte_experimental int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6804
rte_eth_tunnel_type
Definition: rte_ethdev.h:1485
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6687
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__rte_experimental int rte_eth_xstats_query_state(uint16_t port_id, uint64_t id)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4295
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
static __rte_experimental int rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:7146
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:411
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num) __rte_warn_unused_result
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo) __rte_warn_unused_result
rte_eth_fc_mode
Definition: rte_ethdev.h:1396
@ RTE_ETH_FC_TX_PAUSE
Definition: rte_ethdev.h:1399
@ RTE_ETH_FC_RX_PAUSE
Definition: rte_ethdev.h:1398
@ RTE_ETH_FC_NONE
Definition: rte_ethdev.h:1397
@ RTE_ETH_FC_FULL
Definition: rte_ethdev.h:1400
rte_eth_event_macsec_type
Definition: rte_ethdev.h:4094
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:4100
@ RTE_ETH_EVENT_MACSEC_SA_NOT_VALID
Definition: rte_ethdev.h:4108
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:4102
@ RTE_ETH_EVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:4096
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:4104
@ RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR
Definition: rte_ethdev.h:4098
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:4106
int rte_eth_led_on(uint16_t port_id)
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:410
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link) __rte_warn_unused_result
__rte_experimental const char * rte_eth_link_connector_to_str(enum rte_eth_link_connector link_connector)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:412
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6888
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6496
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6565
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:7006
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_reset(uint16_t port_id)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) __rte_warn_unused_result
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
rte_vlan_type
Definition: rte_ethdev.h:476
@ RTE_ETH_VLAN_TYPE_OUTER
Definition: rte_ethdev.h:479
@ RTE_ETH_VLAN_TYPE_INNER
Definition: rte_ethdev.h:478
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2118
rte_eth_hash_function
Definition: rte_ethdev.h:494
@ RTE_ETH_HASH_FUNCTION_DEFAULT
Definition: rte_ethdev.h:496
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT
Definition: rte_ethdev.h:511
@ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR
Definition: rte_ethdev.h:498
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ
Definition: rte_ethdev.h:504
@ RTE_ETH_HASH_FUNCTION_TOEPLITZ
Definition: rte_ethdev.h:497
uint16_t rte_eth_dev_count_total(void)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num) __rte_warn_unused_result
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1965
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6426
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
rte_eth_nb_tcs
Definition: rte_ethdev.h:944
@ RTE_ETH_4_TCS
Definition: rte_ethdev.h:945
@ RTE_ETH_8_TCS
Definition: rte_ethdev.h:946
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
@ RTE_MBUF_HISTORY_OP_TX_BUSY
@ RTE_MBUF_HISTORY_OP_RX
@ RTE_MBUF_HISTORY_OP_TX
@ RTE_MBUF_HISTORY_OP_TX_PREP
static void rte_mbuf_history_mark_bulk(struct rte_mbuf *const *mbufs, unsigned int count, enum rte_mbuf_history_op op)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1961
uint8_t rsvd_mode_params[4]
Definition: rte_ethdev.h:6137
enum rte_eth_cman_obj obj
Definition: rte_ethdev.h:6105
struct rte_cman_red_params red
Definition: rte_ethdev.h:6130
uint8_t rsvd_obj_params[4]
Definition: rte_ethdev.h:6122
enum rte_cman_mode mode
Definition: rte_ethdev.h:6107
uint8_t rsvd[8]
Definition: rte_ethdev.h:6094
uint64_t modes_supported
Definition: rte_ethdev.h:6084
uint64_t objs_supported
Definition: rte_ethdev.h:6089
struct rte_eth_intr_conf intr_conf
Definition: rte_ethdev.h:1577
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1564
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1551
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1550
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1560
struct rte_eth_conf::@165 rx_adv_conf
uint32_t lpbk_mode
Definition: rte_ethdev.h:1552
union rte_eth_conf::@166 tx_adv_conf
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1576
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1568
uint32_t link_speeds
Definition: rte_ethdev.h:1543
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1558
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1570
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1562
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1572
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:2026
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:2025
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:2028
struct rte_eth_dcb_tc_queue_mapping::@167 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@168 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1390
uint16_t nb_seg_max
Definition: rte_ethdev.h:1377
uint16_t nb_align
Definition: rte_ethdev.h:1367
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1822
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1830
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1853
unsigned int if_index
Definition: rte_ethdev.h:1809
uint16_t max_rx_queues
Definition: rte_ethdev.h:1826
uint64_t dev_capa
Definition: rte_ethdev.h:1871
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1851
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1815
uint16_t max_tx_queues
Definition: rte_ethdev.h:1827
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1849
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1832
struct rte_device * device
Definition: rte_ethdev.h:1807
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1848
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1858
enum rte_eth_err_handle_mode err_handle_mode
Definition: rte_ethdev.h:1878
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1823
uint16_t max_mtu
Definition: rte_ethdev.h:1812
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1825
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1850
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1881
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1880
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1841
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1852
uint16_t min_mtu
Definition: rte_ethdev.h:1811
uint16_t reta_size
Definition: rte_ethdev.h:1843
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1854
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1847
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1865
uint16_t max_vfs
Definition: rte_ethdev.h:1831
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1869
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1837
const char * driver_name
Definition: rte_ethdev.h:1808
uint8_t hash_key_size
Definition: rte_ethdev.h:1844
uint32_t speed_capa
Definition: rte_ethdev.h:1855
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1867
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1876
struct rte_eth_rxseg_capa rx_seg_capa
Definition: rte_ethdev.h:1833
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1839
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1835
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1857
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1828
const uint32_t * dev_flags
Definition: rte_ethdev.h:1813
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3922
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:4177
enum rte_eth_event_macsec_type type
Definition: rte_ethdev.h:4117
enum rte_eth_event_macsec_subtype subtype
Definition: rte_ethdev.h:4119
uint32_t low_water
Definition: rte_ethdev.h:1410
uint16_t send_xon
Definition: rte_ethdev.h:1412
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1413
uint32_t high_water
Definition: rte_ethdev.h:1409
uint16_t pause_time
Definition: rte_ethdev.h:1411
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1414
uint16_t max_nb_queues
Definition: rte_ethdev.h:1262
struct rte_eth_hairpin_queue_cap tx_cap
Definition: rte_ethdev.h:1269
struct rte_eth_hairpin_queue_cap rx_cap
Definition: rte_ethdev.h:1268
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1329
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1424
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1461
enum rte_eth_fc_mode mode_capa
Definition: rte_ethdev.h:1440
struct rte_mempool * mp
Definition: rte_ethdev.h:1930
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1929
struct rte_eth_representor_range ranges[]
Definition: rte_ethdev.h:5778
enum rte_eth_representor_type type
Definition: rte_ethdev.h:5754
char name[RTE_DEV_NAME_MAX_LEN]
Definition: rte_ethdev.h:5764
uint8_t * rss_key
Definition: rte_ethdev.h:535
uint8_t rss_key_len
Definition: rte_ethdev.h:536
enum rte_eth_hash_function algorithm
Definition: rte_ethdev.h:542
uint64_t rss_hf
Definition: rte_ethdev.h:541
uint16_t reta[RTE_ETH_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:937
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:1157
uint64_t offloads
Definition: rte_ethdev.h:1175
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1208
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1207
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1160
uint16_t share_group
Definition: rte_ethdev.h:1168
uint8_t rx_drop_en
Definition: rte_ethdev.h:1159
uint16_t share_qid
Definition: rte_ethdev.h:1169
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1183
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1204
uint16_t rx_nseg
Definition: rte_ethdev.h:1161
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1158
uint32_t mtu
Definition: rte_ethdev.h:458
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:460
uint64_t offloads
Definition: rte_ethdev.h:466
void * reserved_ptrs[2]
Definition: rte_ethdev.h:469
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:468
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:457
uint8_t scattered_rx
Definition: rte_ethdev.h:1897
uint8_t queue_state
Definition: rte_ethdev.h:1898
uint8_t avail_thresh
Definition: rte_ethdev.h:1907
uint16_t nb_desc
Definition: rte_ethdev.h:1899
uint16_t rx_buf_size
Definition: rte_ethdev.h:1900
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1758
uint32_t offset_allowed
Definition: rte_ethdev.h:1759
uint32_t offset_align_log2
Definition: rte_ethdev.h:1760
struct rte_mempool * mp
Definition: rte_ethdev.h:1124
uint64_t imissed
Definition: rte_ethdev.h:271
uint64_t obytes
Definition: rte_ethdev.h:266
uint64_t opackets
Definition: rte_ethdev.h:264
uint64_t rx_nombuf
Definition: rte_ethdev.h:274
uint64_t ibytes
Definition: rte_ethdev.h:265
uint64_t ierrors
Definition: rte_ethdev.h:272
uint64_t ipackets
Definition: rte_ethdev.h:263
uint64_t oerrors
Definition: rte_ethdev.h:273
const char * name
Definition: rte_ethdev.h:1732
uint8_t hthresh
Definition: rte_ethdev.h:403
uint8_t pthresh
Definition: rte_ethdev.h:402
uint8_t wthresh
Definition: rte_ethdev.h:404
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1220
uint64_t offloads
Definition: rte_ethdev.h:1226
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1229
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1228
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:1215
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1216
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1217
uint64_t offloads
Definition: rte_ethdev.h:1048
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:1057
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1060
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:1053
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1059
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1055
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:1042
uint8_t queue_state
Definition: rte_ethdev.h:1917
uint16_t nb_desc
Definition: rte_ethdev.h:1916
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:995
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1004
struct rte_eth_vmdq_dcb_conf::@161 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition: rte_ethdev.h:996
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:1026
uint8_t enable_default_pool
Definition: rte_ethdev.h:1027
struct rte_eth_vmdq_rx_conf::@162 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1996
uint64_t value
Definition: rte_ethdev.h:1978
uint64_t id
Definition: rte_ethdev.h:1977