Skip to content

Commit 4ff3203

Browse files
Andre Guedesanguy11
authored andcommitted
igc: Add support for XDP_REDIRECT action
Add support for the XDP_REDIRECT action which enables XDP programs to redirect packets arriving at I225 NIC. It also implements the ndo_xdp_xmit ops, enabling the igc driver to transmit packets forwarded to it by xdp programs running on other interfaces. The patch tweaks the driver's page counting and recycling scheme as described in the following two commits and implemented by other Intel drivers in order to properly support XDP_REDIRECT action: commit 8ce29c6 ("i40e: tweak page counting for XDP_REDIRECT") commit 75aab4e ("i40e: avoid premature Rx buffer reuse") This patch has been tested with the sample apps "xdp_redirect_cpu" and "xdp_redirect_map" located in samples/bpf/. Signed-off-by: Andre Guedes <[email protected]> Signed-off-by: Vedang Patel <[email protected]> Signed-off-by: Jithu Joseph <[email protected]> Reviewed-by: Maciej Fijalkowski <[email protected]> Tested-by: Dvora Fuxbrumer <[email protected]> Signed-off-by: Tony Nguyen <[email protected]>
1 parent 73f1071 commit 4ff3203

File tree

1 file changed

+73
-11
lines changed

1 file changed

+73
-11
lines changed

drivers/net/ethernet/intel/igc/igc_main.c

Lines changed: 73 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#define IGC_XDP_PASS 0
2727
#define IGC_XDP_CONSUMED BIT(0)
2828
#define IGC_XDP_TX BIT(1)
29+
#define IGC_XDP_REDIRECT BIT(2)
2930

3031
static int debug = -1;
3132

@@ -1505,11 +1506,18 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
15051506
}
15061507

15071508
static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1508-
const unsigned int size)
1509+
const unsigned int size,
1510+
int *rx_buffer_pgcnt)
15091511
{
15101512
struct igc_rx_buffer *rx_buffer;
15111513

15121514
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1515+
*rx_buffer_pgcnt =
1516+
#if (PAGE_SIZE < 8192)
1517+
page_count(rx_buffer->page);
1518+
#else
1519+
0;
1520+
#endif
15131521
prefetchw(rx_buffer->page);
15141522

15151523
/* we are reusing so sync this buffer for CPU use */
@@ -1677,7 +1685,8 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
16771685
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
16781686
}
16791687

1680-
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1688+
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1689+
int rx_buffer_pgcnt)
16811690
{
16821691
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
16831692
struct page *page = rx_buffer->page;
@@ -1688,7 +1697,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
16881697

16891698
#if (PAGE_SIZE < 8192)
16901699
/* if we are only owner of page we can reuse it */
1691-
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1700+
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
16921701
return false;
16931702
#else
16941703
#define IGC_LAST_OFFSET \
@@ -1702,8 +1711,8 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
17021711
* the pagecnt_bias and page count so that we fully restock the
17031712
* number of references the driver holds.
17041713
*/
1705-
if (unlikely(!pagecnt_bias)) {
1706-
page_ref_add(page, USHRT_MAX);
1714+
if (unlikely(pagecnt_bias == 1)) {
1715+
page_ref_add(page, USHRT_MAX - 1);
17071716
rx_buffer->pagecnt_bias = USHRT_MAX;
17081717
}
17091718

@@ -1776,9 +1785,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
17761785
}
17771786

17781787
static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1779-
struct igc_rx_buffer *rx_buffer)
1788+
struct igc_rx_buffer *rx_buffer,
1789+
int rx_buffer_pgcnt)
17801790
{
1781-
if (igc_can_reuse_rx_page(rx_buffer)) {
1791+
if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
17821792
/* hand second half of page back to the ring */
17831793
igc_reuse_rx_page(rx_ring, rx_buffer);
17841794
} else {
@@ -1844,7 +1854,8 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
18441854
bi->dma = dma;
18451855
bi->page = page;
18461856
bi->page_offset = igc_rx_offset(rx_ring);
1847-
bi->pagecnt_bias = 1;
1857+
page_ref_add(page, USHRT_MAX - 1);
1858+
bi->pagecnt_bias = USHRT_MAX;
18481859

18491860
return true;
18501861
}
@@ -2040,6 +2051,12 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
20402051
else
20412052
res = IGC_XDP_TX;
20422053
break;
2054+
case XDP_REDIRECT:
2055+
if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2056+
res = IGC_XDP_CONSUMED;
2057+
else
2058+
res = IGC_XDP_REDIRECT;
2059+
break;
20432060
default:
20442061
bpf_warn_invalid_xdp_action(act);
20452062
fallthrough;
@@ -2081,6 +2098,9 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
20812098
igc_flush_tx_descriptors(ring);
20822099
__netif_tx_unlock(nq);
20832100
}
2101+
2102+
if (status & IGC_XDP_REDIRECT)
2103+
xdp_do_flush();
20842104
}
20852105

20862106
static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
@@ -2090,7 +2110,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
20902110
struct igc_ring *rx_ring = q_vector->rx.ring;
20912111
struct sk_buff *skb = rx_ring->skb;
20922112
u16 cleaned_count = igc_desc_unused(rx_ring);
2093-
int xdp_status = 0;
2113+
int xdp_status = 0, rx_buffer_pgcnt;
20942114

20952115
while (likely(total_packets < budget)) {
20962116
union igc_adv_rx_desc *rx_desc;
@@ -2118,7 +2138,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
21182138
*/
21192139
dma_rmb();
21202140

2121-
rx_buffer = igc_get_rx_buffer(rx_ring, size);
2141+
rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
21222142
truesize = igc_get_rx_frame_truesize(rx_ring, size);
21232143

21242144
pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -2149,6 +2169,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
21492169
rx_buffer->pagecnt_bias++;
21502170
break;
21512171
case IGC_XDP_TX:
2172+
case IGC_XDP_REDIRECT:
21522173
igc_rx_buffer_flip(rx_buffer, truesize);
21532174
xdp_status |= xdp_res;
21542175
break;
@@ -2171,7 +2192,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
21712192
break;
21722193
}
21732194

2174-
igc_put_rx_buffer(rx_ring, rx_buffer);
2195+
igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
21752196
cleaned_count++;
21762197

21772198
/* fetch next buffer in frame if non-eop */
@@ -5111,6 +5132,46 @@ static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
51115132
}
51125133
}
51135134

5135+
static int igc_xdp_xmit(struct net_device *dev, int num_frames,
5136+
struct xdp_frame **frames, u32 flags)
5137+
{
5138+
struct igc_adapter *adapter = netdev_priv(dev);
5139+
int cpu = smp_processor_id();
5140+
struct netdev_queue *nq;
5141+
struct igc_ring *ring;
5142+
int i, drops;
5143+
5144+
if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
5145+
return -ENETDOWN;
5146+
5147+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
5148+
return -EINVAL;
5149+
5150+
ring = igc_xdp_get_tx_ring(adapter, cpu);
5151+
nq = txring_txq(ring);
5152+
5153+
__netif_tx_lock(nq, cpu);
5154+
5155+
drops = 0;
5156+
for (i = 0; i < num_frames; i++) {
5157+
int err;
5158+
struct xdp_frame *xdpf = frames[i];
5159+
5160+
err = igc_xdp_init_tx_descriptor(ring, xdpf);
5161+
if (err) {
5162+
xdp_return_frame_rx_napi(xdpf);
5163+
drops++;
5164+
}
5165+
}
5166+
5167+
if (flags & XDP_XMIT_FLUSH)
5168+
igc_flush_tx_descriptors(ring);
5169+
5170+
__netif_tx_unlock(nq);
5171+
5172+
return num_frames - drops;
5173+
}
5174+
51145175
static const struct net_device_ops igc_netdev_ops = {
51155176
.ndo_open = igc_open,
51165177
.ndo_stop = igc_close,
@@ -5125,6 +5186,7 @@ static const struct net_device_ops igc_netdev_ops = {
51255186
.ndo_do_ioctl = igc_ioctl,
51265187
.ndo_setup_tc = igc_setup_tc,
51275188
.ndo_bpf = igc_bpf,
5189+
.ndo_xdp_xmit = igc_xdp_xmit,
51285190
};
51295191

51305192
/* PCIe configuration access */

0 commit comments

Comments
 (0)