Skip to content

Commit bf8e3f1

Browse files
rogerqgregkh
authored andcommitted
net: ethernet: ti: am65-cpsw: fix XDP_DROP, XDP_TX and XDP_REDIRECT
commit 5e24db5 upstream. The following XDP_DROP test from [1] stalls the interface after 250 packets. ~# xdb-bench drop -m native eth0 This is because new RX requests are never queued. Fix that. The below XDP_TX test from [1] fails with a warning [ 499.947381] XDP_WARN: xdp_update_frame_from_buff(line:277): Driver BUG: missing reserved tailroom ~# xdb-bench tx -m native eth0 Fix that by using PAGE_SIZE during xdp_init_buf(). In XDP_REDIRECT case only 1 packet was processed in rx_poll. Fix it to process up to budget packets. Fix all XDP error cases to call trace_xdp_exception() and drop the packet in am65_cpsw_run_xdp(). [1] xdp-tools suite https://github.com/xdp-project/xdp-tools Fixes: 8acacc4 ("net: ethernet: ti: am65-cpsw: Add minimal XDP support") Signed-off-by: Roger Quadros <[email protected]> Reviewed-by: Jacob Keller <[email protected]> Acked-by: Julien Panis <[email protected]> Reviewed-by: MD Danish Anwar <[email protected]> Signed-off-by: Paolo Abeni <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 3fe7ba1 commit bf8e3f1

File tree

1 file changed

+34
-28
lines changed

1 file changed

+34
-28
lines changed

drivers/net/ethernet/ti/am65-cpsw-nuss.c

Lines changed: 34 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -156,12 +156,13 @@
156156
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
157157

158158
/* XDP */
159-
#define AM65_CPSW_XDP_CONSUMED 2
160-
#define AM65_CPSW_XDP_REDIRECT 1
159+
#define AM65_CPSW_XDP_CONSUMED BIT(1)
160+
#define AM65_CPSW_XDP_REDIRECT BIT(0)
161161
#define AM65_CPSW_XDP_PASS 0
162162

163163
/* Include headroom compatible with both skb and xdpf */
164-
#define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
164+
#define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
165+
#define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
165166

166167
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
167168
const u8 *dev_addr)
@@ -933,7 +934,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
933934
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
934935
if (unlikely(!host_desc)) {
935936
ndev->stats.tx_dropped++;
936-
return -ENOMEM;
937+
return AM65_CPSW_XDP_CONSUMED; /* drop */
937938
}
938939

939940
am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
@@ -942,7 +943,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
942943
pkt_len, DMA_TO_DEVICE);
943944
if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
944945
ndev->stats.tx_dropped++;
945-
ret = -ENOMEM;
946+
ret = AM65_CPSW_XDP_CONSUMED; /* drop */
946947
goto pool_free;
947948
}
948949

@@ -977,6 +978,7 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
977978
/* Inform BQL */
978979
netdev_tx_completed_queue(netif_txq, 1, pkt_len);
979980
ndev->stats.tx_errors++;
981+
ret = AM65_CPSW_XDP_CONSUMED; /* drop */
980982
goto dma_unmap;
981983
}
982984

@@ -1004,6 +1006,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
10041006
struct bpf_prog *prog;
10051007
struct page *page;
10061008
u32 act;
1009+
int err;
10071010

10081011
prog = READ_ONCE(port->xdp_prog);
10091012
if (!prog)
@@ -1023,22 +1026,22 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
10231026

10241027
xdpf = xdp_convert_buff_to_frame(xdp);
10251028
if (unlikely(!xdpf))
1026-
break;
1029+
goto drop;
10271030

10281031
__netif_tx_lock(netif_txq, cpu);
1029-
ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
1032+
err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
10301033
AM65_CPSW_TX_BUF_TYPE_XDP_TX);
10311034
__netif_tx_unlock(netif_txq);
1032-
if (ret)
1033-
break;
1035+
if (err)
1036+
goto drop;
10341037

10351038
ndev->stats.rx_bytes += *len;
10361039
ndev->stats.rx_packets++;
10371040
ret = AM65_CPSW_XDP_CONSUMED;
10381041
goto out;
10391042
case XDP_REDIRECT:
10401043
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
1041-
break;
1044+
goto drop;
10421045

10431046
ndev->stats.rx_bytes += *len;
10441047
ndev->stats.rx_packets++;
@@ -1048,6 +1051,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
10481051
bpf_warn_invalid_xdp_action(ndev, prog, act);
10491052
fallthrough;
10501053
case XDP_ABORTED:
1054+
drop:
10511055
trace_xdp_exception(ndev, prog, act);
10521056
fallthrough;
10531057
case XDP_DROP:
@@ -1056,7 +1060,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
10561060

10571061
page = virt_to_head_page(xdp->data);
10581062
am65_cpsw_put_page(rx_chn, page, true, desc_idx);
1059-
10601063
out:
10611064
return ret;
10621065
}
@@ -1095,7 +1098,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
10951098
}
10961099

10971100
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
1098-
u32 flow_idx, int cpu)
1101+
u32 flow_idx, int cpu, int *xdp_state)
10991102
{
11001103
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
11011104
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -1114,6 +1117,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
11141117
void **swdata;
11151118
u32 *psdata;
11161119

1120+
*xdp_state = AM65_CPSW_XDP_PASS;
11171121
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
11181122
if (ret) {
11191123
if (ret != -ENODATA)
@@ -1161,15 +1165,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
11611165
}
11621166

11631167
if (port->xdp_prog) {
1164-
xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq);
1165-
1166-
xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb),
1168+
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq);
1169+
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
11671170
pkt_len, false);
1168-
1169-
ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
1170-
cpu, &pkt_len);
1171-
if (ret != AM65_CPSW_XDP_PASS)
1172-
return ret;
1171+
*xdp_state = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
1172+
cpu, &pkt_len);
1173+
if (*xdp_state != AM65_CPSW_XDP_PASS)
1174+
goto allocate;
11731175

11741176
/* Compute additional headroom to be reserved */
11751177
headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
@@ -1193,9 +1195,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
11931195
stats->rx_bytes += pkt_len;
11941196
u64_stats_update_end(&stats->syncp);
11951197

1198+
allocate:
11961199
new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
1197-
if (unlikely(!new_page))
1200+
if (unlikely(!new_page)) {
1201+
dev_err(dev, "page alloc failed\n");
11981202
return -ENOMEM;
1203+
}
1204+
11991205
rx_chn->pages[desc_idx] = new_page;
12001206

12011207
if (netif_dormant(ndev)) {
@@ -1229,29 +1235,29 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
12291235
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
12301236
int flow = AM65_CPSW_MAX_RX_FLOWS;
12311237
int cpu = smp_processor_id();
1232-
bool xdp_redirect = false;
1238+
int xdp_state_or = 0;
12331239
int cur_budget, ret;
1240+
int xdp_state;
12341241
int num_rx = 0;
12351242

12361243
/* process every flow */
12371244
while (flow--) {
12381245
cur_budget = budget - num_rx;
12391246

12401247
while (cur_budget--) {
1241-
ret = am65_cpsw_nuss_rx_packets(common, flow, cpu);
1242-
if (ret) {
1243-
if (ret == AM65_CPSW_XDP_REDIRECT)
1244-
xdp_redirect = true;
1248+
ret = am65_cpsw_nuss_rx_packets(common, flow, cpu,
1249+
&xdp_state);
1250+
xdp_state_or |= xdp_state;
1251+
if (ret)
12451252
break;
1246-
}
12471253
num_rx++;
12481254
}
12491255

12501256
if (num_rx >= budget)
12511257
break;
12521258
}
12531259

1254-
if (xdp_redirect)
1260+
if (xdp_state_or & AM65_CPSW_XDP_REDIRECT)
12551261
xdp_do_flush();
12561262

12571263
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);

0 commit comments

Comments
 (0)