26
26
#define IGC_XDP_PASS 0
27
27
#define IGC_XDP_CONSUMED BIT(0)
28
28
#define IGC_XDP_TX BIT(1)
29
+ #define IGC_XDP_REDIRECT BIT(2)
29
30
30
31
static int debug = -1 ;
31
32
@@ -1505,11 +1506,18 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
1505
1506
}
1506
1507
1507
1508
static struct igc_rx_buffer * igc_get_rx_buffer (struct igc_ring * rx_ring ,
1508
- const unsigned int size )
1509
+ const unsigned int size ,
1510
+ int * rx_buffer_pgcnt )
1509
1511
{
1510
1512
struct igc_rx_buffer * rx_buffer ;
1511
1513
1512
1514
rx_buffer = & rx_ring -> rx_buffer_info [rx_ring -> next_to_clean ];
1515
+ * rx_buffer_pgcnt =
1516
+ #if (PAGE_SIZE < 8192 )
1517
+ page_count (rx_buffer -> page );
1518
+ #else
1519
+ 0 ;
1520
+ #endif
1513
1521
prefetchw (rx_buffer -> page );
1514
1522
1515
1523
/* we are reusing so sync this buffer for CPU use */
@@ -1677,7 +1685,8 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1677
1685
new_buff -> pagecnt_bias = old_buff -> pagecnt_bias ;
1678
1686
}
1679
1687
1680
- static bool igc_can_reuse_rx_page (struct igc_rx_buffer * rx_buffer )
1688
+ static bool igc_can_reuse_rx_page (struct igc_rx_buffer * rx_buffer ,
1689
+ int rx_buffer_pgcnt )
1681
1690
{
1682
1691
unsigned int pagecnt_bias = rx_buffer -> pagecnt_bias ;
1683
1692
struct page * page = rx_buffer -> page ;
@@ -1688,7 +1697,7 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1688
1697
1689
1698
#if (PAGE_SIZE < 8192 )
1690
1699
/* if we are only owner of page we can reuse it */
1691
- if (unlikely ((page_ref_count ( page ) - pagecnt_bias ) > 1 ))
1700
+ if (unlikely ((rx_buffer_pgcnt - pagecnt_bias ) > 1 ))
1692
1701
return false;
1693
1702
#else
1694
1703
#define IGC_LAST_OFFSET \
@@ -1702,8 +1711,8 @@ static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1702
1711
* the pagecnt_bias and page count so that we fully restock the
1703
1712
* number of references the driver holds.
1704
1713
*/
1705
- if (unlikely (! pagecnt_bias )) {
1706
- page_ref_add (page , USHRT_MAX );
1714
+ if (unlikely (pagecnt_bias == 1 )) {
1715
+ page_ref_add (page , USHRT_MAX - 1 );
1707
1716
rx_buffer -> pagecnt_bias = USHRT_MAX ;
1708
1717
}
1709
1718
@@ -1776,9 +1785,10 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1776
1785
}
1777
1786
1778
1787
static void igc_put_rx_buffer (struct igc_ring * rx_ring ,
1779
- struct igc_rx_buffer * rx_buffer )
1788
+ struct igc_rx_buffer * rx_buffer ,
1789
+ int rx_buffer_pgcnt )
1780
1790
{
1781
- if (igc_can_reuse_rx_page (rx_buffer )) {
1791
+ if (igc_can_reuse_rx_page (rx_buffer , rx_buffer_pgcnt )) {
1782
1792
/* hand second half of page back to the ring */
1783
1793
igc_reuse_rx_page (rx_ring , rx_buffer );
1784
1794
} else {
@@ -1844,7 +1854,8 @@ static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1844
1854
bi -> dma = dma ;
1845
1855
bi -> page = page ;
1846
1856
bi -> page_offset = igc_rx_offset (rx_ring );
1847
- bi -> pagecnt_bias = 1 ;
1857
+ page_ref_add (page , USHRT_MAX - 1 );
1858
+ bi -> pagecnt_bias = USHRT_MAX ;
1848
1859
1849
1860
return true;
1850
1861
}
@@ -2040,6 +2051,12 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2040
2051
else
2041
2052
res = IGC_XDP_TX ;
2042
2053
break ;
2054
+ case XDP_REDIRECT :
2055
+ if (xdp_do_redirect (adapter -> netdev , xdp , prog ) < 0 )
2056
+ res = IGC_XDP_CONSUMED ;
2057
+ else
2058
+ res = IGC_XDP_REDIRECT ;
2059
+ break ;
2043
2060
default :
2044
2061
bpf_warn_invalid_xdp_action (act );
2045
2062
fallthrough ;
@@ -2081,6 +2098,9 @@ static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2081
2098
igc_flush_tx_descriptors (ring );
2082
2099
__netif_tx_unlock (nq );
2083
2100
}
2101
+
2102
+ if (status & IGC_XDP_REDIRECT )
2103
+ xdp_do_flush ();
2084
2104
}
2085
2105
2086
2106
static int igc_clean_rx_irq (struct igc_q_vector * q_vector , const int budget )
@@ -2090,7 +2110,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2090
2110
struct igc_ring * rx_ring = q_vector -> rx .ring ;
2091
2111
struct sk_buff * skb = rx_ring -> skb ;
2092
2112
u16 cleaned_count = igc_desc_unused (rx_ring );
2093
- int xdp_status = 0 ;
2113
+ int xdp_status = 0 , rx_buffer_pgcnt ;
2094
2114
2095
2115
while (likely (total_packets < budget )) {
2096
2116
union igc_adv_rx_desc * rx_desc ;
@@ -2118,7 +2138,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2118
2138
*/
2119
2139
dma_rmb ();
2120
2140
2121
- rx_buffer = igc_get_rx_buffer (rx_ring , size );
2141
+ rx_buffer = igc_get_rx_buffer (rx_ring , size , & rx_buffer_pgcnt );
2122
2142
truesize = igc_get_rx_frame_truesize (rx_ring , size );
2123
2143
2124
2144
pktbuf = page_address (rx_buffer -> page ) + rx_buffer -> page_offset ;
@@ -2149,6 +2169,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2149
2169
rx_buffer -> pagecnt_bias ++ ;
2150
2170
break ;
2151
2171
case IGC_XDP_TX :
2172
+ case IGC_XDP_REDIRECT :
2152
2173
igc_rx_buffer_flip (rx_buffer , truesize );
2153
2174
xdp_status |= xdp_res ;
2154
2175
break ;
@@ -2171,7 +2192,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2171
2192
break ;
2172
2193
}
2173
2194
2174
- igc_put_rx_buffer (rx_ring , rx_buffer );
2195
+ igc_put_rx_buffer (rx_ring , rx_buffer , rx_buffer_pgcnt );
2175
2196
cleaned_count ++ ;
2176
2197
2177
2198
/* fetch next buffer in frame if non-eop */
@@ -5111,6 +5132,46 @@ static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
5111
5132
}
5112
5133
}
5113
5134
5135
+ static int igc_xdp_xmit (struct net_device * dev , int num_frames ,
5136
+ struct xdp_frame * * frames , u32 flags )
5137
+ {
5138
+ struct igc_adapter * adapter = netdev_priv (dev );
5139
+ int cpu = smp_processor_id ();
5140
+ struct netdev_queue * nq ;
5141
+ struct igc_ring * ring ;
5142
+ int i , drops ;
5143
+
5144
+ if (unlikely (test_bit (__IGC_DOWN , & adapter -> state )))
5145
+ return - ENETDOWN ;
5146
+
5147
+ if (unlikely (flags & ~XDP_XMIT_FLAGS_MASK ))
5148
+ return - EINVAL ;
5149
+
5150
+ ring = igc_xdp_get_tx_ring (adapter , cpu );
5151
+ nq = txring_txq (ring );
5152
+
5153
+ __netif_tx_lock (nq , cpu );
5154
+
5155
+ drops = 0 ;
5156
+ for (i = 0 ; i < num_frames ; i ++ ) {
5157
+ int err ;
5158
+ struct xdp_frame * xdpf = frames [i ];
5159
+
5160
+ err = igc_xdp_init_tx_descriptor (ring , xdpf );
5161
+ if (err ) {
5162
+ xdp_return_frame_rx_napi (xdpf );
5163
+ drops ++ ;
5164
+ }
5165
+ }
5166
+
5167
+ if (flags & XDP_XMIT_FLUSH )
5168
+ igc_flush_tx_descriptors (ring );
5169
+
5170
+ __netif_tx_unlock (nq );
5171
+
5172
+ return num_frames - drops ;
5173
+ }
5174
+
5114
5175
static const struct net_device_ops igc_netdev_ops = {
5115
5176
.ndo_open = igc_open ,
5116
5177
.ndo_stop = igc_close ,
@@ -5125,6 +5186,7 @@ static const struct net_device_ops igc_netdev_ops = {
5125
5186
.ndo_do_ioctl = igc_ioctl ,
5126
5187
.ndo_setup_tc = igc_setup_tc ,
5127
5188
.ndo_bpf = igc_bpf ,
5189
+ .ndo_xdp_xmit = igc_xdp_xmit ,
5128
5190
};
5129
5191
5130
5192
/* PCIe configuration access */
0 commit comments