@@ -400,9 +400,16 @@ static inline int efa_data_path_direct_post_send(
400400 return err ;
401401 }
402402
403+ /* This means we are starting from fresh after a db ring so we need a barrier */
404+ if (!sq -> num_wqe_pending )
405+ mmio_wc_start ();
406+
403407 /* when reaching the sq max_batch, ring the db */
404- if (sq -> num_wqe_pending == sq -> wq .max_batch )
408+ if (sq -> num_wqe_pending == sq -> wq .max_batch ) {
405409 efa_data_path_direct_send_wr_ring_db (sq );
410+ /* we will prepare more WQE after db ring, so we need a barrier */
411+ mmio_wc_start ();
412+ }
406413
407414 /* Set work request ID */
408415 qp -> ibv_qp_ex -> wr_id = wr_id ;
@@ -481,12 +488,22 @@ static inline int efa_data_path_direct_post_read(
481488 /* Validate SGE count for RDMA operations */
482489 if (OFI_UNLIKELY (sge_count > EFA_IO_TX_DESC_NUM_RDMA_BUFS )) {
483490 EFA_WARN (FI_LOG_EP_DATA , "EFA device doesn't support > %d iov for rdma operations\n" , EFA_IO_TX_DESC_NUM_RDMA_BUFS );
491+ /* ring db for earlier wqes if there is any */
492+ if (sq -> num_wqe_pending )
493+ efa_data_path_direct_send_wr_ring_db (sq );
484494 return EINVAL ;
485495 }
486496
497+ /* This means we are starting from fresh after a db ring so we need a barrier */
498+ if (!sq -> num_wqe_pending )
499+ mmio_wc_start ();
500+
487501 /* when reaching the sq max_batch, ring the db */
488- if (sq -> num_wqe_pending == sq -> wq .max_batch )
502+ if (sq -> num_wqe_pending == sq -> wq .max_batch ) {
489503 efa_data_path_direct_send_wr_ring_db (sq );
504+ /* we will prepare more WQE after db ring, so we need a barrier */
505+ mmio_wc_start ();
506+ }
490507
491508 /* Set work request ID */
492509 qp -> ibv_qp_ex -> wr_id = wr_id ;
@@ -552,6 +569,16 @@ efa_data_path_direct_post_write(
552569 struct efa_io_remote_mem_addr * remote_mem = & local_wqe .data .rdma_req .remote_mem ;
553570 int err ;
554571
572+ /* Validate SGE count for RDMA operations */
573+ if (OFI_UNLIKELY (sge_count > EFA_IO_TX_DESC_NUM_RDMA_BUFS )) {
574+ EFA_WARN (FI_LOG_EP_DATA , "EFA device doesn't support > %d iov for rdma operations\n" , EFA_IO_TX_DESC_NUM_RDMA_BUFS );
575+ /* ring db for earlier wqes if there is any */
576+ if (sq -> num_wqe_pending ) {
577+ efa_data_path_direct_send_wr_ring_db (sq );
578+ }
579+ return EINVAL ;
580+ }
581+
555582 /* Validate queue space */
556583 err = efa_post_send_validate (qp );
557584 if (OFI_UNLIKELY (err )) {
@@ -564,12 +591,22 @@ efa_data_path_direct_post_write(
564591 /* Validate SGE count for RDMA operations */
565592 if (OFI_UNLIKELY (sge_count > EFA_IO_TX_DESC_NUM_RDMA_BUFS )) {
566593 EFA_WARN (FI_LOG_EP_DATA , "EFA device doesn't support > %d iov for rdma operations\n" , EFA_IO_TX_DESC_NUM_RDMA_BUFS );
594+ /* ring db for earlier wqes if there is any */
595+ if (sq -> num_wqe_pending )
596+ efa_data_path_direct_send_wr_ring_db (sq );
567597 return EINVAL ;
568598 }
569599
600+ /* This means we are starting from fresh after a db ring so we need a barrier */
601+ if (!sq -> num_wqe_pending )
602+ mmio_wc_start ();
603+
570604 /* when reaching the sq max_batch, ring the db */
571- if (sq -> num_wqe_pending == sq -> wq .max_batch )
605+ if (sq -> num_wqe_pending == sq -> wq .max_batch ) {
572606 efa_data_path_direct_send_wr_ring_db (sq );
607+ /* we will prepare more WQE after db ring, so we need a barrier */
608+ mmio_wc_start ();
609+ }
573610
574611 /* Set work request ID */
575612 qp -> ibv_qp_ex -> wr_id = wr_id ;
0 commit comments