]> git.feebdaed.xyz Git - 0xmirror/dpdk.git/commitdiff
net/mlx5: move auxiliary data inline
authorBing Zhao <bingz@nvidia.com>
Mon, 17 Nov 2025 07:27:52 +0000 (09:27 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Tue, 18 Nov 2025 13:19:43 +0000 (14:19 +0100)
Since auxiliary structure is associated with per rule, it can be
allocated in the same ipool allocation to save the extra overhead
of the *alloc header and the unneeded CPU cycles.

Fixes: 27d171b88031 ("net/mlx5: abstract flow action and enable reconfigure")
Cc: stable@dpdk.org
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/mlx5_flow_hw.c

index e642e2664e89032ec41c19b640393e9ea4b668bb..7f73183bb14b43d90ae6ccaecdf2269bf256fe2d 100644 (file)
@@ -1643,16 +1643,17 @@ err_secondary:
        /* Read link status in case it is up and there will be no event. */
        mlx5_link_update(eth_dev, 0);
        /* Watch LSC interrupts between port probe and port start. */
-       priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
-                                                       eth_dev->data->port_id;
+       priv->sh->port[priv->dev_port - 1].nl_ih_port_id = eth_dev->data->port_id;
        mlx5_set_link_up(eth_dev);
        for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
                icfg[i].release_mem_en = !!sh->config.reclaim_mode;
                if (sh->config.reclaim_mode)
                        icfg[i].per_core_cache = 0;
 #ifdef HAVE_MLX5_HWS_SUPPORT
-               if (priv->sh->config.dv_flow_en == 2)
+               if (priv->sh->config.dv_flow_en == 2) {
                        icfg[i].size = sizeof(struct rte_flow_hw) + sizeof(struct rte_flow_nt2hws);
+                       icfg[i].size += sizeof(struct rte_flow_hw_aux);
+               }
 #endif
                priv->flows[i] = mlx5_ipool_create(&icfg[i]);
                if (!priv->flows[i])
index f8995b53cc23d76ed92db00e56501423c50e9ec5..c41b99746ffa3f8f350ffaae3cacea80547d70ca 100644 (file)
@@ -13500,8 +13500,9 @@ static int flow_hw_prepare(struct rte_eth_dev *dev,
        (*flow)->nt2hws = (struct rte_flow_nt2hws *)
                                ((uintptr_t)(*flow) + sizeof(struct rte_flow_hw));
        (*flow)->idx = idx;
-       (*flow)->nt2hws->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux),
-                                   RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
+       (*flow)->nt2hws->flow_aux = (struct rte_flow_hw_aux *)
+               ((uintptr_t)((*flow)->nt2hws) + sizeof(struct rte_flow_nt2hws));
+
        if (!(*flow)->nt2hws->flow_aux)
                return rte_flow_error_set(error, ENOMEM,
                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -14152,10 +14153,8 @@ flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow)
          * Notice matcher destroy will take place when matcher's list is destroyed
          * , same as for DV.
          */
-       if (flow->nt2hws->flow_aux) {
-               mlx5_free(flow->nt2hws->flow_aux);
+       if (flow->nt2hws->flow_aux)
                flow->nt2hws->flow_aux = NULL;
-       }
        if (flow->nt2hws->rix_encap_decap) {
                flow_encap_decap_resource_release(dev, flow->nt2hws->rix_encap_decap);
                flow->nt2hws->rix_encap_decap = 0;