@@ -166,7 +166,7 @@ static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
166166
167167 linear_rq_headroom += NET_IP_ALIGN ;
168168
169- if (params -> rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST )
169+ if (params -> rq_wq_type == MLX5_WQ_TYPE_CYCLIC )
170170 return linear_rq_headroom ;
171171
172172 if (mlx5e_rx_mpwqe_is_linear_skb (mdev , params ))
@@ -205,7 +205,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
205205 params -> rq_wq_type = mlx5e_striding_rq_possible (mdev , params ) &&
206206 MLX5E_GET_PFLAG (params , MLX5E_PFLAG_RX_STRIDING_RQ ) ?
207207 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
208- MLX5_WQ_TYPE_LINKED_LIST ;
208+ MLX5_WQ_TYPE_CYCLIC ;
209209}
210210
211211static void mlx5e_update_carrier (struct mlx5e_priv * priv )
@@ -325,7 +325,7 @@ static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
325325 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
326326 return mlx5_wq_ll_get_size (& rq -> mpwqe .wq );
327327 default :
328- return mlx5_wq_ll_get_size (& rq -> wqe .wq );
328+ return mlx5_wq_cyc_get_size (& rq -> wqe .wq );
329329 }
330330}
331331
@@ -491,15 +491,15 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
491491 if (err )
492492 goto err_destroy_umr_mkey ;
493493 break ;
494- default : /* MLX5_WQ_TYPE_LINKED_LIST */
495- err = mlx5_wq_ll_create (mdev , & rqp -> wq , rqc_wq , & rq -> wqe .wq ,
496- & rq -> wq_ctrl );
494+ default : /* MLX5_WQ_TYPE_CYCLIC */
495+ err = mlx5_wq_cyc_create (mdev , & rqp -> wq , rqc_wq , & rq -> wqe .wq ,
496+ & rq -> wq_ctrl );
497497 if (err )
498498 return err ;
499499
500500 rq -> wqe .wq .db = & rq -> wqe .wq .db [MLX5_RCV_DBR ];
501501
502- wq_sz = mlx5_wq_ll_get_size (& rq -> wqe .wq );
502+ wq_sz = mlx5_wq_cyc_get_size (& rq -> wqe .wq );
503503
504504 rq -> wqe .frag_info =
505505 kzalloc_node (wq_sz * sizeof (* rq -> wqe .frag_info ),
@@ -568,19 +568,19 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
568568
569569 for (i = 0 ; i < wq_sz ; i ++ ) {
570570 if (rq -> wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ) {
571- struct mlx5e_rx_wqe * wqe =
571+ struct mlx5e_rx_wqe_ll * wqe =
572572 mlx5_wq_ll_get_wqe (& rq -> mpwqe .wq , i );
573573 u64 dma_offset = mlx5e_get_mpwqe_offset (rq , i );
574574
575- wqe -> data .addr = cpu_to_be64 (dma_offset + rq -> buff .headroom );
576- wqe -> data .byte_count = cpu_to_be32 (byte_count );
577- wqe -> data .lkey = rq -> mkey_be ;
575+ wqe -> data [ 0 ] .addr = cpu_to_be64 (dma_offset + rq -> buff .headroom );
576+ wqe -> data [ 0 ] .byte_count = cpu_to_be32 (byte_count );
577+ wqe -> data [ 0 ] .lkey = rq -> mkey_be ;
578578 } else {
579- struct mlx5e_rx_wqe * wqe =
580- mlx5_wq_ll_get_wqe (& rq -> wqe .wq , i );
579+ struct mlx5e_rx_wqe_cyc * wqe =
580+ mlx5_wq_cyc_get_wqe (& rq -> wqe .wq , i );
581581
582- wqe -> data .byte_count = cpu_to_be32 (byte_count );
583- wqe -> data .lkey = rq -> mkey_be ;
582+ wqe -> data [ 0 ] .byte_count = cpu_to_be32 (byte_count );
583+ wqe -> data [ 0 ] .lkey = rq -> mkey_be ;
584584 }
585585 }
586586
@@ -630,7 +630,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
630630 kfree (rq -> mpwqe .info );
631631 mlx5_core_destroy_mkey (rq -> mdev , & rq -> umr_mkey );
632632 break ;
633- default : /* MLX5_WQ_TYPE_LINKED_LIST */
633+ default : /* MLX5_WQ_TYPE_CYCLIC */
634634 kfree (rq -> wqe .frag_info );
635635 }
636636
@@ -801,11 +801,12 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
801801 if (rq -> wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ) {
802802 struct mlx5_wq_ll * wq = & rq -> mpwqe .wq ;
803803
804+ /* UMR WQE (if in progress) is always at wq->head */
804805 if (rq -> mpwqe .umr_in_progress )
805806 mlx5e_free_rx_mpwqe (rq , & rq -> mpwqe .info [wq -> head ]);
806807
807808 while (!mlx5_wq_ll_is_empty (wq )) {
808- struct mlx5e_rx_wqe * wqe ;
809+ struct mlx5e_rx_wqe_ll * wqe ;
809810
810811 wqe_ix_be = * wq -> tail_next ;
811812 wqe_ix = be16_to_cpu (wqe_ix_be );
@@ -815,24 +816,19 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
815816 & wqe -> next .next_wqe_index );
816817 }
817818 } else {
818- struct mlx5_wq_ll * wq = & rq -> wqe .wq ;
819-
820- while (!mlx5_wq_ll_is_empty (wq )) {
821- struct mlx5e_rx_wqe * wqe ;
819+ struct mlx5_wq_cyc * wq = & rq -> wqe .wq ;
822820
823- wqe_ix_be = * wq -> tail_next ;
824- wqe_ix = be16_to_cpu (wqe_ix_be );
825- wqe = mlx5_wq_ll_get_wqe (wq , wqe_ix );
821+ while (!mlx5_wq_cyc_is_empty (wq )) {
822+ wqe_ix = mlx5_wq_cyc_get_tail (wq );
826823 rq -> dealloc_wqe (rq , wqe_ix );
827- mlx5_wq_ll_pop (wq , wqe_ix_be ,
828- & wqe -> next .next_wqe_index );
824+ mlx5_wq_cyc_pop (wq );
829825 }
830826
831827 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
832828 * but yet to be re-posted.
833829 */
834830 if (rq -> wqe .page_reuse ) {
835- int wq_sz = mlx5_wq_ll_get_size (wq );
831+ int wq_sz = mlx5_wq_cyc_get_size (wq );
836832
837833 for (wqe_ix = 0 ; wqe_ix < wq_sz ; wqe_ix ++ )
838834 rq -> dealloc_wqe (rq , wqe_ix );
@@ -1958,13 +1954,29 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
19581954 kfree (c );
19591955}
19601956
1957+ static inline u8 mlx5e_get_rqwq_log_stride (u8 wq_type , int ndsegs )
1958+ {
1959+ int sz = sizeof (struct mlx5_wqe_data_seg ) * ndsegs ;
1960+
1961+ switch (wq_type ) {
1962+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
1963+ sz += sizeof (struct mlx5e_rx_wqe_ll );
1964+ break ;
1965+ default : /* MLX5_WQ_TYPE_CYCLIC */
1966+ sz += sizeof (struct mlx5e_rx_wqe_cyc );
1967+ }
1968+
1969+ return order_base_2 (sz );
1970+ }
1971+
19611972static void mlx5e_build_rq_param (struct mlx5e_priv * priv ,
19621973 struct mlx5e_params * params ,
19631974 struct mlx5e_rq_param * param )
19641975{
19651976 struct mlx5_core_dev * mdev = priv -> mdev ;
19661977 void * rqc = param -> rqc ;
19671978 void * wq = MLX5_ADDR_OF (rqc , rqc , wq );
1979+ int ndsegs = 1 ;
19681980
19691981 switch (params -> rq_wq_type ) {
19701982 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
@@ -1974,16 +1986,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
19741986 MLX5_SET (wq , wq , log_wqe_stride_size ,
19751987 mlx5e_mpwqe_get_log_stride_size (mdev , params ) -
19761988 MLX5_MPWQE_LOG_STRIDE_SZ_BASE );
1977- MLX5_SET (wq , wq , wq_type , MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ );
19781989 MLX5_SET (wq , wq , log_wq_sz , mlx5e_mpwqe_get_log_rq_size (params ));
19791990 break ;
1980- default : /* MLX5_WQ_TYPE_LINKED_LIST */
1981- MLX5_SET (wq , wq , wq_type , MLX5_WQ_TYPE_LINKED_LIST );
1991+ default : /* MLX5_WQ_TYPE_CYCLIC */
19821992 MLX5_SET (wq , wq , log_wq_sz , params -> log_rq_mtu_frames );
19831993 }
19841994
1995+ MLX5_SET (wq , wq , wq_type , params -> rq_wq_type );
19851996 MLX5_SET (wq , wq , end_padding_mode , MLX5_WQ_END_PAD_MODE_ALIGN );
1986- MLX5_SET (wq , wq , log_wq_stride , ilog2 (sizeof (struct mlx5e_rx_wqe )));
1997+ MLX5_SET (wq , wq , log_wq_stride ,
1998+ mlx5e_get_rqwq_log_stride (params -> rq_wq_type , ndsegs ));
19871999 MLX5_SET (wq , wq , pd , mdev -> mlx5e_res .pdn );
19882000 MLX5_SET (rqc , rqc , counter_set_id , priv -> q_counter );
19892001 MLX5_SET (rqc , rqc , vsd , params -> vlan_strip_disable );
@@ -1999,8 +2011,9 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
19992011 void * rqc = param -> rqc ;
20002012 void * wq = MLX5_ADDR_OF (rqc , rqc , wq );
20012013
2002- MLX5_SET (wq , wq , wq_type , MLX5_WQ_TYPE_LINKED_LIST );
2003- MLX5_SET (wq , wq , log_wq_stride , ilog2 (sizeof (struct mlx5e_rx_wqe )));
2014+ MLX5_SET (wq , wq , wq_type , MLX5_WQ_TYPE_CYCLIC );
2015+ MLX5_SET (wq , wq , log_wq_stride ,
2016+ mlx5e_get_rqwq_log_stride (MLX5_WQ_TYPE_CYCLIC , 1 ));
20042017 MLX5_SET (rqc , rqc , counter_set_id , priv -> drop_rq_q_counter );
20052018
20062019 param -> wq .buf_numa_node = dev_to_node (& mdev -> pdev -> dev );
@@ -2051,7 +2064,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
20512064 log_cq_size = mlx5e_mpwqe_get_log_rq_size (params ) +
20522065 mlx5e_mpwqe_get_log_num_strides (mdev , params );
20532066 break ;
2054- default : /* MLX5_WQ_TYPE_LINKED_LIST */
2067+ default : /* MLX5_WQ_TYPE_CYCLIC */
20552068 log_cq_size = params -> log_rq_mtu_frames ;
20562069 }
20572070
@@ -2857,8 +2870,8 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
28572870
28582871 param -> wq .db_numa_node = param -> wq .buf_numa_node ;
28592872
2860- err = mlx5_wq_ll_create (mdev , & param -> wq , rqc_wq , & rq -> wqe .wq ,
2861- & rq -> wq_ctrl );
2873+ err = mlx5_wq_cyc_create (mdev , & param -> wq , rqc_wq , & rq -> wqe .wq ,
2874+ & rq -> wq_ctrl );
28622875 if (err )
28632876 return err ;
28642877
@@ -3360,7 +3373,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
33603373 new_channels .params = * old_params ;
33613374 new_channels .params .lro_en = enable ;
33623375
3363- if (old_params -> rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST ) {
3376+ if (old_params -> rq_wq_type != MLX5_WQ_TYPE_CYCLIC ) {
33643377 if (mlx5e_rx_mpwqe_is_linear_skb (mdev , old_params ) ==
33653378 mlx5e_rx_mpwqe_is_linear_skb (mdev , & new_channels .params ))
33663379 reset = false;
@@ -3566,7 +3579,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
35663579 new_channels .params = * params ;
35673580 new_channels .params .sw_mtu = new_mtu ;
35683581
3569- if (params -> rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST ) {
3582+ if (params -> rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ) {
35703583 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe (params );
35713584 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe (& new_channels .params );
35723585
0 commit comments