/* Read link status in case it is up and there will be no event. */
mlx5_link_update(eth_dev, 0);
/* Watch LSC interrupts between port probe and port start. */
- priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
- eth_dev->data->port_id;
+ priv->sh->port[priv->dev_port - 1].nl_ih_port_id = eth_dev->data->port_id;
mlx5_set_link_up(eth_dev);
for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
icfg[i].release_mem_en = !!sh->config.reclaim_mode;
if (sh->config.reclaim_mode)
icfg[i].per_core_cache = 0;
#ifdef HAVE_MLX5_HWS_SUPPORT
- if (priv->sh->config.dv_flow_en == 2)
+ if (priv->sh->config.dv_flow_en == 2) {
icfg[i].size = sizeof(struct rte_flow_hw) + sizeof(struct rte_flow_nt2hws);
+ icfg[i].size += sizeof(struct rte_flow_hw_aux);
+ }
#endif
priv->flows[i] = mlx5_ipool_create(&icfg[i]);
if (!priv->flows[i])
(*flow)->nt2hws = (struct rte_flow_nt2hws *)
((uintptr_t)(*flow) + sizeof(struct rte_flow_hw));
(*flow)->idx = idx;
- (*flow)->nt2hws->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct rte_flow_hw_aux),
- RTE_CACHE_LINE_SIZE, rte_dev_numa_node(dev->device));
+ (*flow)->nt2hws->flow_aux = (struct rte_flow_hw_aux *)
+ ((uintptr_t)((*flow)->nt2hws) + sizeof(struct rte_flow_nt2hws));
+
if (!(*flow)->nt2hws->flow_aux)
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
* Notice matcher destroy will take place when matcher's list is destroyed
* , same as for DV.
*/
- if (flow->nt2hws->flow_aux) {
- mlx5_free(flow->nt2hws->flow_aux);
+ if (flow->nt2hws->flow_aux)
flow->nt2hws->flow_aux = NULL;
- }
if (flow->nt2hws->rix_encap_decap) {
flow_encap_decap_resource_release(dev, flow->nt2hws->rix_encap_decap);
flow->nt2hws->rix_encap_decap = 0;