DPDK  25.11.0
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
240 #include <rte_bitops.h>
241 #include <rte_common.h>
242 #include <rte_compat.h>
243 #include <rte_errno.h>
244 #include <rte_mbuf_pool_ops.h>
245 #include <rte_mempool.h>
246 
247 #include "rte_eventdev_trace_fp.h"
248 
249 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
250 struct rte_event;
251 
252 /* Event device capability bitmap flags */
253 #define RTE_EVENT_DEV_CAP_QUEUE_QOS RTE_BIT32(0)
271 #define RTE_EVENT_DEV_CAP_EVENT_QOS RTE_BIT32(1)
285 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED RTE_BIT32(2)
295 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES RTE_BIT32(3)
319 #define RTE_EVENT_DEV_CAP_BURST_MODE RTE_BIT32(4)
330 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE RTE_BIT32(5)
343 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE RTE_BIT32(6)
355 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK RTE_BIT32(7)
366 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT RTE_BIT32(8)
376 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID RTE_BIT32(9)
385 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE RTE_BIT32(10)
398 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR RTE_BIT32(11)
408 #define RTE_EVENT_DEV_CAP_PROFILE_LINK RTE_BIT32(12)
422 #define RTE_EVENT_DEV_CAP_ATOMIC RTE_BIT32(13)
430 #define RTE_EVENT_DEV_CAP_ORDERED RTE_BIT32(14)
438 #define RTE_EVENT_DEV_CAP_PARALLEL RTE_BIT32(15)
446 #define RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ RTE_BIT32(16)
465 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE RTE_BIT32(17)
477 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE RTE_BIT32(18)
489 #define RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE RTE_BIT32(19)
499 #define RTE_EVENT_DEV_CAP_PRESCHEDULE_EXPLICIT RTE_BIT32(20)
508 /* Event device priority levels */
509 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
516 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
523 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
531 /* Event queue scheduling weights */
532 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
538 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
545 /* Event queue scheduling affinity */
546 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
552 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
565 uint8_t
567 
580 int
581 rte_event_dev_get_dev_id(const char *name);
582 
594 int
595 rte_event_dev_socket_id(uint8_t dev_id);
596 
601  const char *driver_name;
602  struct rte_device *dev;
666  int32_t max_num_events;
673  uint32_t event_dev_cap;
685 };
686 
703 int
704 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
705 
709 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
713 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
717 #define RTE_EVENT_DEV_ATTR_STARTED 2
718 
731 int
732 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
733  uint32_t *attr_value);
734 
735 
736 /* Event device configuration bitmap flags */
737 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT RTE_BIT32(0)
765 };
766 
800  uint8_t nb_event_ports;
829  uint32_t event_dev_cfg;
844 };
845 
870 int
871 rte_event_dev_configure(uint8_t dev_id,
872  const struct rte_event_dev_config *dev_conf);
873 
874 /* Event queue specific APIs */
875 
876 /* Event queue configuration bitmap flags */
877 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES RTE_BIT32(0)
891 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK RTE_BIT32(1)
912  uint32_t nb_atomic_flows;
943  uint32_t event_queue_cfg;
945  uint8_t schedule_type;
955  uint8_t priority;
966  uint8_t weight;
977  uint8_t affinity;
988 };
989 
1011 int
1012 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
1013  struct rte_event_queue_conf *queue_conf);
1014 
1034 int
1035 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
1036  const struct rte_event_queue_conf *queue_conf);
1037 
1041 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
1045 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
1049 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
1053 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
1057 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
1061 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
1065 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
1066 
1087 int
1088 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1089  uint32_t *attr_value);
1090 
1110 int
1111 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1112  uint64_t attr_value);
1113 
1114 /* Event port specific APIs */
1115 
1116 /* Event port configuration bitmap flags */
1117 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL RTE_BIT32(0)
1124 #define RTE_EVENT_PORT_CFG_SINGLE_LINK RTE_BIT32(1)
1132 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER RTE_BIT32(2)
1142 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER RTE_BIT32(3)
1153 #define RTE_EVENT_PORT_CFG_HINT_WORKER RTE_BIT32(4)
1164 #define RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ RTE_BIT32(5)
1195  uint16_t dequeue_depth;
1202  uint16_t enqueue_depth;
1209  uint32_t event_port_cfg;
1210 };
1211 
1235 int
1236 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1237  struct rte_event_port_conf *port_conf);
1238 
1265 int
1266 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1267  const struct rte_event_port_conf *port_conf);
1268 
1269 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1270  struct rte_event event, void *arg);
1300 void
1301 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1302  rte_eventdev_port_flush_t release_cb, void *args);
1303 
1307 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1311 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1317 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1321 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1325 #define RTE_EVENT_PORT_ATTR_INDEPENDENT_ENQ 4
1326 
1344 int
1345 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1346  uint32_t *attr_value);
1347 
1366 int
1367 rte_event_dev_start(uint8_t dev_id);
1368 
1387 void
1388 rte_event_dev_stop(uint8_t dev_id);
1389 
1390 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1391  struct rte_event event, void *arg);
1425  rte_eventdev_stop_flush_t callback, void *userdata);
1426 
1440 int
1441 rte_event_dev_close(uint8_t dev_id);
1442 
1446 struct __rte_aligned(16) rte_event_vector {
1447  uint16_t nb_elem;
1449  uint16_t elem_offset : 12;
1451  uint16_t rsvd : 3;
1453  uint16_t attr_valid : 1;
1456  union {
1457  /* Used by Rx/Tx adapter.
1458  * Indicates that all the elements in this vector belong to the
1459  * same port and queue pair when originating from Rx adapter,
1460  * valid only when event type is ETHDEV_VECTOR or
1461  * ETH_RX_ADAPTER_VECTOR.
1462  * Can also be used to indicate the Tx adapter the destination
1463  * port and queue of the mbufs in the vector
1464  */
1465  struct {
1466  uint16_t port;
1467  uint16_t queue;
1468  };
1469  };
1471  uint64_t impl_opaque;
1472 
1473 /* empty structures do not have zero size in C++ leading to compilation errors
1474  * with clang about structure having different sizes in C and C++.
1475  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1476  * C++ builds, removing the warning.
1477  */
1478 #ifndef __cplusplus
1484  union __rte_aligned(16) {
1485 #endif
1486  struct rte_mbuf *mbufs[0];
1487  void *ptrs[0];
1488  uint64_t u64s[0];
1489 #ifndef __cplusplus
1490  };
1491 #endif
1496 };
1497 
1498 /* Scheduler type definitions */
1499 #define RTE_SCHED_TYPE_ORDERED 0
1537 #define RTE_SCHED_TYPE_ATOMIC 1
1564 #define RTE_SCHED_TYPE_PARALLEL 2
1577 /* Event types to classify the event source */
1578 #define RTE_EVENT_TYPE_ETHDEV 0x0
1580 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1582 #define RTE_EVENT_TYPE_TIMER 0x2
1584 #define RTE_EVENT_TYPE_CPU 0x3
1588 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1590 #define RTE_EVENT_TYPE_DMADEV 0x5
1592 #define RTE_EVENT_TYPE_VECTOR 0x8
1604 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1605  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1607 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1609 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1610  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1612 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1613  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1616 #define RTE_EVENT_TYPE_MAX 0x10
1619 /* Event enqueue operations */
1620 #define RTE_EVENT_OP_NEW 0
1625 #define RTE_EVENT_OP_FORWARD 1
1637 #define RTE_EVENT_OP_RELEASE 2
1676 struct rte_event {
1677  /* WORD0 */
1678  union {
1679  uint64_t event;
1681  struct {
1682  uint32_t flow_id:20;
1694  uint32_t sub_event_type:8;
1701  uint32_t event_type:4;
1706  uint8_t op:2;
1716  uint8_t rsvd:4;
1724  uint8_t sched_type:2;
1741  uint8_t queue_id;
1749  uint8_t priority;
1773  uint8_t impl_opaque;
1787  };
1788  };
1789  /* WORD1 */
1790  union {
1791  uint64_t u64;
1793  void *event_ptr;
1795  struct rte_mbuf *mbuf;
1797  struct rte_event_vector *vec;
1799  };
1800 };
1801 
1802 /* Ethdev Rx adapter capability bitmap flags */
1803 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1807 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1811 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1818 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1839 int
1840 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1841  uint32_t *caps);
1842 
1843 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT RTE_BIT32(0)
1846 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC RTE_BIT32(1)
1862 int
1863 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1864 
1865 /* Crypto adapter capability bitmap flag */
1866 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1873 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1880 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1885 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1890 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1914 int
1915 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1916  uint32_t *caps);
1917 
1918 /* DMA adapter capability bitmap flag */
1919 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1926 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1933 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1956 __rte_experimental
1957 int
1958 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1959 
1960 /* Ethdev Tx adapter capability bitmap flags */
1961 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1964 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1985 int
1986 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1987  uint32_t *caps);
1988 
1989 /* Vector adapter capability bitmap flags */
1990 #define RTE_EVENT_VECTOR_ADAPTER_CAP_INTERNAL_PORT 0x1
1995 __rte_experimental
1996 int
1997 rte_event_vector_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1998 
2023 int
2024 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
2025  uint64_t *timeout_ticks);
2026 
2090 int
2091 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
2092  const uint8_t queues[], const uint8_t priorities[],
2093  uint16_t nb_links);
2094 
2138 int
2139 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
2140  uint8_t queues[], uint16_t nb_unlinks);
2141 
2214 __rte_experimental
2215 int
2216 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
2217  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
2218 
2267 __rte_experimental
2268 int
2269 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2270  uint16_t nb_unlinks, uint8_t profile_id);
2271 
2293 int
2294 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2295 
2322 int
2323 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2324  uint8_t queues[], uint8_t priorities[]);
2325 
2357 __rte_experimental
2358 int
2359 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2360  uint8_t priorities[], uint8_t profile_id);
2361 
2377 int
2378 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2379 
2393 int
2394 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2395 
2397 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2398 
2403  RTE_EVENT_DEV_XSTATS_DEVICE,
2404  RTE_EVENT_DEV_XSTATS_PORT,
2405  RTE_EVENT_DEV_XSTATS_QUEUE,
2406 };
2407 
2415  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2416 };
2417 
2450 int
2452  enum rte_event_dev_xstats_mode mode,
2453  uint8_t queue_port_id,
2454  struct rte_event_dev_xstats_name *xstats_names,
2455  uint64_t *ids,
2456  unsigned int size);
2457 
2484 int
2486  enum rte_event_dev_xstats_mode mode,
2487  uint8_t queue_port_id,
2488  const uint64_t ids[],
2489  uint64_t values[], unsigned int n);
2490 
2507 uint64_t
2508 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2509  uint64_t *id);
2510 
2531 int
2533  enum rte_event_dev_xstats_mode mode,
2534  int16_t queue_port_id,
2535  const uint64_t ids[],
2536  uint32_t nb_ids);
2537 
2548 int rte_event_dev_selftest(uint8_t dev_id);
2549 
2580 struct rte_mempool *
2581 rte_event_vector_pool_create(const char *name, unsigned int n,
2582  unsigned int cache_size, uint16_t nb_elem,
2583  int socket_id);
2584 
2585 #include <rte_eventdev_core.h>
2586 
2587 #ifdef __cplusplus
2588 extern "C" {
2589 #endif
2590 
2591 static __rte_always_inline uint16_t
2592 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2593  const struct rte_event ev[], uint16_t nb_events,
2594  const event_enqueue_burst_t fn)
2595 {
2596  const struct rte_event_fp_ops *fp_ops;
2597  void *port;
2598 
2599  fp_ops = &rte_event_fp_ops[dev_id];
2600  port = fp_ops->data[port_id];
2601 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2602  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2603  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2604  rte_errno = EINVAL;
2605  return 0;
2606  }
2607 
2608  if (port == NULL) {
2609  rte_errno = EINVAL;
2610  return 0;
2611  }
2612 #endif
2613  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2614 
2615  return fn(port, ev, nb_events);
2616 }
2617 
2661 static inline uint16_t
2662 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2663  const struct rte_event ev[], uint16_t nb_events)
2664 {
2665  const struct rte_event_fp_ops *fp_ops;
2666 
2667  fp_ops = &rte_event_fp_ops[dev_id];
2668  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2669  fp_ops->enqueue_burst);
2670 }
2671 
2713 static inline uint16_t
2714 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2715  const struct rte_event ev[], uint16_t nb_events)
2716 {
2717  const struct rte_event_fp_ops *fp_ops;
2718 
2719  fp_ops = &rte_event_fp_ops[dev_id];
2720  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2721  fp_ops->enqueue_new_burst);
2722 }
2723 
2765 static inline uint16_t
2766 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2767  const struct rte_event ev[], uint16_t nb_events)
2768 {
2769  const struct rte_event_fp_ops *fp_ops;
2770 
2771  fp_ops = &rte_event_fp_ops[dev_id];
2772  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2773  fp_ops->enqueue_forward_burst);
2774 }
2775 
2842 static inline uint16_t
2843 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2844  uint16_t nb_events, uint64_t timeout_ticks)
2845 {
2846  const struct rte_event_fp_ops *fp_ops;
2847  void *port;
2848 
2849  fp_ops = &rte_event_fp_ops[dev_id];
2850  port = fp_ops->data[port_id];
2851 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2852  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2853  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2854  rte_errno = EINVAL;
2855  return 0;
2856  }
2857 
2858  if (port == NULL) {
2859  rte_errno = EINVAL;
2860  return 0;
2861  }
2862 #endif
2863  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2864 
2865  return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
2866 }
2867 
2868 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2910 static inline int
2911 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2912 {
2913  const struct rte_event_fp_ops *fp_ops;
2914  void *port;
2915 
2916  fp_ops = &rte_event_fp_ops[dev_id];
2917  port = fp_ops->data[port_id];
2918 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2919  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2920  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2921  return -EINVAL;
2922 
2923  if (port == NULL)
2924  return -EINVAL;
2925 
2926  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2927  return -EINVAL;
2928 #endif
2929  rte_eventdev_trace_maintain(dev_id, port_id, op);
2930 
2931  if (fp_ops->maintain != NULL)
2932  fp_ops->maintain(port, op);
2933 
2934  return 0;
2935 }
2936 
2958 static inline uint8_t
2959 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2960 {
2961  const struct rte_event_fp_ops *fp_ops;
2962  void *port;
2963 
2964  fp_ops = &rte_event_fp_ops[dev_id];
2965  port = fp_ops->data[port_id];
2966 
2967 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2968  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2969  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2970  return -EINVAL;
2971 
2972  if (port == NULL)
2973  return -EINVAL;
2974 
2975  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2976  return -EINVAL;
2977 #endif
2978  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2979 
2980  return fp_ops->profile_switch(port, profile_id);
2981 }
2982 
3006 __rte_experimental
3007 static inline int
3008 rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id,
3010 {
3011  const struct rte_event_fp_ops *fp_ops;
3012  void *port;
3013 
3014  fp_ops = &rte_event_fp_ops[dev_id];
3015  port = fp_ops->data[port_id];
3016 
3017 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3018  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3019  return -EINVAL;
3020 
3021  if (port == NULL)
3022  return -EINVAL;
3023 #endif
3024  rte_eventdev_trace_port_preschedule_modify(dev_id, port_id, type);
3025 
3026  return fp_ops->preschedule_modify(port, type);
3027 }
3028 
3050 __rte_experimental
3051 static inline void
3052 rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id,
3054 {
3055  const struct rte_event_fp_ops *fp_ops;
3056  void *port;
3057 
3058  fp_ops = &rte_event_fp_ops[dev_id];
3059  port = fp_ops->data[port_id];
3060 
3061 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3062  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3063  return;
3064  if (port == NULL)
3065  return;
3066 #endif
3067  rte_eventdev_trace_port_preschedule(dev_id, port_id, type);
3068 
3069  fp_ops->preschedule(port, type);
3070 }
3071 #ifdef __cplusplus
3072 }
3073 #endif
3074 
3075 #endif /* _RTE_EVENTDEV_H_ */
#define __rte_always_inline
Definition: rte_common.h:490
#define rte_errno
Definition: rte_errno.h:29
struct __rte_aligned(16) rte_event_vector
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
static __rte_experimental int rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
rte_event_dev_xstats_mode
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_dev_selftest(uint8_t dev_id)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
void rte_event_dev_stop(uint8_t dev_id)
uint8_t rte_event_dev_count(void)
rte_event_dev_preschedule_type
Definition: rte_eventdev.h:743
@ RTE_EVENT_PRESCHEDULE
Definition: rte_eventdev.h:749
@ RTE_EVENT_PRESCHEDULE_ADAPTIVE
Definition: rte_eventdev.h:756
@ RTE_EVENT_PRESCHEDULE_NONE
Definition: rte_eventdev.h:744
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
int rte_event_dev_get_dev_id(const char *name)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
int rte_event_dev_start(uint8_t dev_id)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
int rte_event_dev_close(uint8_t dev_id)
static __rte_experimental void rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:769
enum rte_event_dev_preschedule_type preschedule_type
Definition: rte_eventdev.h:839
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:831
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:822
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:810
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:815
uint8_t max_event_port_links
Definition: rte_eventdev.h:663
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:656
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:607
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:603
uint8_t max_event_queues
Definition: rte_eventdev.h:609
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:614
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:649
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:616
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:681
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:630
uint32_t event_dev_cap
Definition: rte_eventdev.h:673
const char * driver_name
Definition: rte_eventdev.h:601
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:605
struct rte_device * dev
Definition: rte_eventdev.h:602
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:675
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:925
uint8_t priority
uint32_t flow_id
uint8_t rsvd
uint8_t op
uint32_t event_type
struct rte_mbuf * mbuf
uint8_t queue_id
uint8_t sched_type
uint64_t u64
struct rte_event_vector * vec
uint8_t impl_opaque
uint32_t sub_event_type
void * event_ptr
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t cache_size
Definition: rte_mempool.h:241