156
156
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
157
157
158
158
/* XDP */
159
- #define AM65_CPSW_XDP_CONSUMED 2
160
- #define AM65_CPSW_XDP_REDIRECT 1
159
+ #define AM65_CPSW_XDP_CONSUMED BIT(1)
160
+ #define AM65_CPSW_XDP_REDIRECT BIT(0)
161
161
#define AM65_CPSW_XDP_PASS 0
162
162
163
163
/* Include headroom compatible with both skb and xdpf */
164
- #define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
164
+ #define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
165
+ #define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
165
166
166
167
static void am65_cpsw_port_set_sl_mac (struct am65_cpsw_port * slave ,
167
168
const u8 * dev_addr )
@@ -933,7 +934,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
933
934
host_desc = k3_cppi_desc_pool_alloc (tx_chn -> desc_pool );
934
935
if (unlikely (!host_desc )) {
935
936
ndev -> stats .tx_dropped ++ ;
936
- return - ENOMEM ;
937
+ return AM65_CPSW_XDP_CONSUMED ; /* drop */
937
938
}
938
939
939
940
am65_cpsw_nuss_set_buf_type (tx_chn , host_desc , buf_type );
@@ -942,7 +943,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
942
943
pkt_len , DMA_TO_DEVICE );
943
944
if (unlikely (dma_mapping_error (tx_chn -> dma_dev , dma_buf ))) {
944
945
ndev -> stats .tx_dropped ++ ;
945
- ret = - ENOMEM ;
946
+ ret = AM65_CPSW_XDP_CONSUMED ; /* drop */
946
947
goto pool_free ;
947
948
}
948
949
@@ -977,6 +978,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
977
978
/* Inform BQL */
978
979
netdev_tx_completed_queue (netif_txq , 1 , pkt_len );
979
980
ndev -> stats .tx_errors ++ ;
981
+ ret = AM65_CPSW_XDP_CONSUMED ; /* drop */
980
982
goto dma_unmap ;
981
983
}
982
984
@@ -1004,6 +1006,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
1004
1006
struct bpf_prog * prog ;
1005
1007
struct page * page ;
1006
1008
u32 act ;
1009
+ int err ;
1007
1010
1008
1011
prog = READ_ONCE (port -> xdp_prog );
1009
1012
if (!prog )
@@ -1023,22 +1026,22 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
1023
1026
1024
1027
xdpf = xdp_convert_buff_to_frame (xdp );
1025
1028
if (unlikely (!xdpf ))
1026
- break ;
1029
+ goto drop ;
1027
1030
1028
1031
__netif_tx_lock (netif_txq , cpu );
1029
- ret = am65_cpsw_xdp_tx_frame (ndev , tx_chn , xdpf ,
1032
+ err = am65_cpsw_xdp_tx_frame (ndev , tx_chn , xdpf ,
1030
1033
AM65_CPSW_TX_BUF_TYPE_XDP_TX );
1031
1034
__netif_tx_unlock (netif_txq );
1032
- if (ret )
1033
- break ;
1035
+ if (err )
1036
+ goto drop ;
1034
1037
1035
1038
ndev -> stats .rx_bytes += * len ;
1036
1039
ndev -> stats .rx_packets ++ ;
1037
1040
ret = AM65_CPSW_XDP_CONSUMED ;
1038
1041
goto out ;
1039
1042
case XDP_REDIRECT :
1040
1043
if (unlikely (xdp_do_redirect (ndev , xdp , prog )))
1041
- break ;
1044
+ goto drop ;
1042
1045
1043
1046
ndev -> stats .rx_bytes += * len ;
1044
1047
ndev -> stats .rx_packets ++ ;
@@ -1048,6 +1051,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
1048
1051
bpf_warn_invalid_xdp_action (ndev , prog , act );
1049
1052
fallthrough ;
1050
1053
case XDP_ABORTED :
1054
+ drop :
1051
1055
trace_xdp_exception (ndev , prog , act );
1052
1056
fallthrough ;
1053
1057
case XDP_DROP :
@@ -1056,7 +1060,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
1056
1060
1057
1061
page = virt_to_head_page (xdp -> data );
1058
1062
am65_cpsw_put_page (rx_chn , page , true, desc_idx );
1059
-
1060
1063
out :
1061
1064
return ret ;
1062
1065
}
@@ -1095,7 +1098,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
1095
1098
}
1096
1099
1097
1100
static int am65_cpsw_nuss_rx_packets (struct am65_cpsw_common * common ,
1098
- u32 flow_idx , int cpu )
1101
+ u32 flow_idx , int cpu , int * xdp_state )
1099
1102
{
1100
1103
struct am65_cpsw_rx_chn * rx_chn = & common -> rx_chns ;
1101
1104
u32 buf_dma_len , pkt_len , port_id = 0 , csum_info ;
@@ -1114,6 +1117,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1114
1117
void * * swdata ;
1115
1118
u32 * psdata ;
1116
1119
1120
+ * xdp_state = AM65_CPSW_XDP_PASS ;
1117
1121
ret = k3_udma_glue_pop_rx_chn (rx_chn -> rx_chn , flow_idx , & desc_dma );
1118
1122
if (ret ) {
1119
1123
if (ret != - ENODATA )
@@ -1161,15 +1165,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1161
1165
}
1162
1166
1163
1167
if (port -> xdp_prog ) {
1164
- xdp_init_buff (& xdp , AM65_CPSW_MAX_PACKET_SIZE , & port -> xdp_rxq );
1165
-
1166
- xdp_prepare_buff (& xdp , page_addr , skb_headroom (skb ),
1168
+ xdp_init_buff (& xdp , PAGE_SIZE , & port -> xdp_rxq );
1169
+ xdp_prepare_buff (& xdp , page_addr , AM65_CPSW_HEADROOM ,
1167
1170
pkt_len , false);
1168
-
1169
- ret = am65_cpsw_run_xdp (common , port , & xdp , desc_idx ,
1170
- cpu , & pkt_len );
1171
- if (ret != AM65_CPSW_XDP_PASS )
1172
- return ret ;
1171
+ * xdp_state = am65_cpsw_run_xdp (common , port , & xdp , desc_idx ,
1172
+ cpu , & pkt_len );
1173
+ if (* xdp_state != AM65_CPSW_XDP_PASS )
1174
+ goto allocate ;
1173
1175
1174
1176
/* Compute additional headroom to be reserved */
1175
1177
headroom = (xdp .data - xdp .data_hard_start ) - skb_headroom (skb );
@@ -1193,9 +1195,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1193
1195
stats -> rx_bytes += pkt_len ;
1194
1196
u64_stats_update_end (& stats -> syncp );
1195
1197
1198
+ allocate :
1196
1199
new_page = page_pool_dev_alloc_pages (rx_chn -> page_pool );
1197
- if (unlikely (!new_page ))
1200
+ if (unlikely (!new_page )) {
1201
+ dev_err (dev , "page alloc failed\n" );
1198
1202
return - ENOMEM ;
1203
+ }
1204
+
1199
1205
rx_chn -> pages [desc_idx ] = new_page ;
1200
1206
1201
1207
if (netif_dormant (ndev )) {
@@ -1229,29 +1235,29 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
1229
1235
struct am65_cpsw_common * common = am65_cpsw_napi_to_common (napi_rx );
1230
1236
int flow = AM65_CPSW_MAX_RX_FLOWS ;
1231
1237
int cpu = smp_processor_id ();
1232
- bool xdp_redirect = false ;
1238
+ int xdp_state_or = 0 ;
1233
1239
int cur_budget , ret ;
1240
+ int xdp_state ;
1234
1241
int num_rx = 0 ;
1235
1242
1236
1243
/* process every flow */
1237
1244
while (flow -- ) {
1238
1245
cur_budget = budget - num_rx ;
1239
1246
1240
1247
while (cur_budget -- ) {
1241
- ret = am65_cpsw_nuss_rx_packets (common , flow , cpu );
1242
- if ( ret ) {
1243
- if ( ret == AM65_CPSW_XDP_REDIRECT )
1244
- xdp_redirect = true;
1248
+ ret = am65_cpsw_nuss_rx_packets (common , flow , cpu ,
1249
+ & xdp_state );
1250
+ xdp_state_or |= xdp_state ;
1251
+ if ( ret )
1245
1252
break ;
1246
- }
1247
1253
num_rx ++ ;
1248
1254
}
1249
1255
1250
1256
if (num_rx >= budget )
1251
1257
break ;
1252
1258
}
1253
1259
1254
- if (xdp_redirect )
1260
+ if (xdp_state_or & AM65_CPSW_XDP_REDIRECT )
1255
1261
xdp_do_flush ();
1256
1262
1257
1263
dev_dbg (common -> dev , "%s num_rx:%d %d\n" , __func__ , num_rx , budget );
0 commit comments