25
25
26
26
#define IGC_XDP_PASS 0
27
27
#define IGC_XDP_CONSUMED BIT(0)
28
+ #define IGC_XDP_TX BIT(1)
28
29
29
30
static int debug = -1 ;
30
31
@@ -181,8 +182,10 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
181
182
while (i != tx_ring -> next_to_use ) {
182
183
union igc_adv_tx_desc * eop_desc , * tx_desc ;
183
184
184
- /* Free all the Tx ring sk_buffs */
185
- dev_kfree_skb_any (tx_buffer -> skb );
185
+ if (tx_buffer -> tx_flags & IGC_TX_FLAGS_XDP )
186
+ xdp_return_frame (tx_buffer -> xdpf );
187
+ else
188
+ dev_kfree_skb_any (tx_buffer -> skb );
186
189
187
190
/* unmap skb header data */
188
191
dma_unmap_single (tx_ring -> dev ,
@@ -410,6 +413,8 @@ void igc_free_rx_resources(struct igc_ring *rx_ring)
410
413
{
411
414
igc_clean_rx_ring (rx_ring );
412
415
416
+ igc_xdp_unregister_rxq_info (rx_ring );
417
+
413
418
vfree (rx_ring -> rx_buffer_info );
414
419
rx_ring -> rx_buffer_info = NULL ;
415
420
@@ -447,7 +452,11 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
447
452
{
448
453
struct net_device * ndev = rx_ring -> netdev ;
449
454
struct device * dev = rx_ring -> dev ;
450
- int size , desc_len ;
455
+ int size , desc_len , res ;
456
+
457
+ res = igc_xdp_register_rxq_info (rx_ring );
458
+ if (res < 0 )
459
+ return res ;
451
460
452
461
size = sizeof (struct igc_rx_buffer ) * rx_ring -> count ;
453
462
rx_ring -> rx_buffer_info = vzalloc (size );
@@ -473,6 +482,7 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
473
482
return 0 ;
474
483
475
484
err :
485
+ igc_xdp_unregister_rxq_info (rx_ring );
476
486
vfree (rx_ring -> rx_buffer_info );
477
487
rx_ring -> rx_buffer_info = NULL ;
478
488
netdev_err (ndev , "Unable to allocate memory for Rx descriptor ring\n" );
@@ -1909,6 +1919,101 @@ static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1909
1919
}
1910
1920
}
1911
1921
1922
+ static int igc_xdp_init_tx_buffer (struct igc_tx_buffer * buffer ,
1923
+ struct xdp_frame * xdpf ,
1924
+ struct igc_ring * ring )
1925
+ {
1926
+ dma_addr_t dma ;
1927
+
1928
+ dma = dma_map_single (ring -> dev , xdpf -> data , xdpf -> len , DMA_TO_DEVICE );
1929
+ if (dma_mapping_error (ring -> dev , dma )) {
1930
+ netdev_err_once (ring -> netdev , "Failed to map DMA for TX\n" );
1931
+ return - ENOMEM ;
1932
+ }
1933
+
1934
+ buffer -> xdpf = xdpf ;
1935
+ buffer -> tx_flags = IGC_TX_FLAGS_XDP ;
1936
+ buffer -> protocol = 0 ;
1937
+ buffer -> bytecount = xdpf -> len ;
1938
+ buffer -> gso_segs = 1 ;
1939
+ buffer -> time_stamp = jiffies ;
1940
+ dma_unmap_len_set (buffer , len , xdpf -> len );
1941
+ dma_unmap_addr_set (buffer , dma , dma );
1942
+ return 0 ;
1943
+ }
1944
+
1945
+ /* This function requires __netif_tx_lock is held by the caller. */
1946
+ static int igc_xdp_init_tx_descriptor (struct igc_ring * ring ,
1947
+ struct xdp_frame * xdpf )
1948
+ {
1949
+ struct igc_tx_buffer * buffer ;
1950
+ union igc_adv_tx_desc * desc ;
1951
+ u32 cmd_type , olinfo_status ;
1952
+ int err ;
1953
+
1954
+ if (!igc_desc_unused (ring ))
1955
+ return - EBUSY ;
1956
+
1957
+ buffer = & ring -> tx_buffer_info [ring -> next_to_use ];
1958
+ err = igc_xdp_init_tx_buffer (buffer , xdpf , ring );
1959
+ if (err )
1960
+ return err ;
1961
+
1962
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1963
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1964
+ buffer -> bytecount ;
1965
+ olinfo_status = buffer -> bytecount << IGC_ADVTXD_PAYLEN_SHIFT ;
1966
+
1967
+ desc = IGC_TX_DESC (ring , ring -> next_to_use );
1968
+ desc -> read .cmd_type_len = cpu_to_le32 (cmd_type );
1969
+ desc -> read .olinfo_status = cpu_to_le32 (olinfo_status );
1970
+ desc -> read .buffer_addr = cpu_to_le64 (dma_unmap_addr (buffer , dma ));
1971
+
1972
+ netdev_tx_sent_queue (txring_txq (ring ), buffer -> bytecount );
1973
+
1974
+ buffer -> next_to_watch = desc ;
1975
+
1976
+ ring -> next_to_use ++ ;
1977
+ if (ring -> next_to_use == ring -> count )
1978
+ ring -> next_to_use = 0 ;
1979
+
1980
+ return 0 ;
1981
+ }
1982
+
1983
+ static struct igc_ring * igc_xdp_get_tx_ring (struct igc_adapter * adapter ,
1984
+ int cpu )
1985
+ {
1986
+ int index = cpu ;
1987
+
1988
+ if (unlikely (index < 0 ))
1989
+ index = 0 ;
1990
+
1991
+ while (index >= adapter -> num_tx_queues )
1992
+ index -= adapter -> num_tx_queues ;
1993
+
1994
+ return adapter -> tx_ring [index ];
1995
+ }
1996
+
1997
+ static int igc_xdp_xmit_back (struct igc_adapter * adapter , struct xdp_buff * xdp )
1998
+ {
1999
+ struct xdp_frame * xdpf = xdp_convert_buff_to_frame (xdp );
2000
+ int cpu = smp_processor_id ();
2001
+ struct netdev_queue * nq ;
2002
+ struct igc_ring * ring ;
2003
+ int res ;
2004
+
2005
+ if (unlikely (!xdpf ))
2006
+ return - EFAULT ;
2007
+
2008
+ ring = igc_xdp_get_tx_ring (adapter , cpu );
2009
+ nq = txring_txq (ring );
2010
+
2011
+ __netif_tx_lock (nq , cpu );
2012
+ res = igc_xdp_init_tx_descriptor (ring , xdpf );
2013
+ __netif_tx_unlock (nq );
2014
+ return res ;
2015
+ }
2016
+
1912
2017
static struct sk_buff * igc_xdp_run_prog (struct igc_adapter * adapter ,
1913
2018
struct xdp_buff * xdp )
1914
2019
{
@@ -1929,6 +2034,12 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
1929
2034
case XDP_PASS :
1930
2035
res = IGC_XDP_PASS ;
1931
2036
break ;
2037
+ case XDP_TX :
2038
+ if (igc_xdp_xmit_back (adapter , xdp ) < 0 )
2039
+ res = IGC_XDP_CONSUMED ;
2040
+ else
2041
+ res = IGC_XDP_TX ;
2042
+ break ;
1932
2043
default :
1933
2044
bpf_warn_invalid_xdp_action (act );
1934
2045
fallthrough ;
@@ -1945,20 +2056,49 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
1945
2056
return ERR_PTR (- res );
1946
2057
}
1947
2058
2059
+ /* This function assumes __netif_tx_lock is held by the caller. */
2060
+ static void igc_flush_tx_descriptors (struct igc_ring * ring )
2061
+ {
2062
+ /* Once tail pointer is updated, hardware can fetch the descriptors
2063
+ * any time so we issue a write membar here to ensure all memory
2064
+ * writes are complete before the tail pointer is updated.
2065
+ */
2066
+ wmb ();
2067
+ writel (ring -> next_to_use , ring -> tail );
2068
+ }
2069
+
2070
+ static void igc_finalize_xdp (struct igc_adapter * adapter , int status )
2071
+ {
2072
+ int cpu = smp_processor_id ();
2073
+ struct netdev_queue * nq ;
2074
+ struct igc_ring * ring ;
2075
+
2076
+ if (status & IGC_XDP_TX ) {
2077
+ ring = igc_xdp_get_tx_ring (adapter , cpu );
2078
+ nq = txring_txq (ring );
2079
+
2080
+ __netif_tx_lock (nq , cpu );
2081
+ igc_flush_tx_descriptors (ring );
2082
+ __netif_tx_unlock (nq );
2083
+ }
2084
+ }
2085
+
1948
2086
static int igc_clean_rx_irq (struct igc_q_vector * q_vector , const int budget )
1949
2087
{
1950
2088
unsigned int total_bytes = 0 , total_packets = 0 ;
2089
+ struct igc_adapter * adapter = q_vector -> adapter ;
1951
2090
struct igc_ring * rx_ring = q_vector -> rx .ring ;
1952
2091
struct sk_buff * skb = rx_ring -> skb ;
1953
2092
u16 cleaned_count = igc_desc_unused (rx_ring );
2093
+ int xdp_status = 0 ;
1954
2094
1955
2095
while (likely (total_packets < budget )) {
1956
2096
union igc_adv_rx_desc * rx_desc ;
1957
2097
struct igc_rx_buffer * rx_buffer ;
2098
+ unsigned int size , truesize ;
1958
2099
ktime_t timestamp = 0 ;
1959
2100
struct xdp_buff xdp ;
1960
2101
int pkt_offset = 0 ;
1961
- unsigned int size ;
1962
2102
void * pktbuf ;
1963
2103
1964
2104
/* return some buffers to hardware, one at a time is too slow */
@@ -1979,6 +2119,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
1979
2119
dma_rmb ();
1980
2120
1981
2121
rx_buffer = igc_get_rx_buffer (rx_ring , size );
2122
+ truesize = igc_get_rx_frame_truesize (rx_ring , size );
1982
2123
1983
2124
pktbuf = page_address (rx_buffer -> page ) + rx_buffer -> page_offset ;
1984
2125
@@ -1990,19 +2131,29 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
1990
2131
}
1991
2132
1992
2133
if (!skb ) {
1993
- struct igc_adapter * adapter = q_vector -> adapter ;
1994
-
1995
2134
xdp .data = pktbuf + pkt_offset ;
1996
2135
xdp .data_end = xdp .data + size ;
1997
2136
xdp .data_hard_start = pktbuf - igc_rx_offset (rx_ring );
1998
2137
xdp_set_data_meta_invalid (& xdp );
1999
- xdp .frame_sz = igc_get_rx_frame_truesize (rx_ring , size );
2138
+ xdp .frame_sz = truesize ;
2139
+ xdp .rxq = & rx_ring -> xdp_rxq ;
2000
2140
2001
2141
skb = igc_xdp_run_prog (adapter , & xdp );
2002
2142
}
2003
2143
2004
2144
if (IS_ERR (skb )) {
2005
- rx_buffer -> pagecnt_bias ++ ;
2145
+ unsigned int xdp_res = - PTR_ERR (skb );
2146
+
2147
+ switch (xdp_res ) {
2148
+ case IGC_XDP_CONSUMED :
2149
+ rx_buffer -> pagecnt_bias ++ ;
2150
+ break ;
2151
+ case IGC_XDP_TX :
2152
+ igc_rx_buffer_flip (rx_buffer , truesize );
2153
+ xdp_status |= xdp_res ;
2154
+ break ;
2155
+ }
2156
+
2006
2157
total_packets ++ ;
2007
2158
total_bytes += size ;
2008
2159
} else if (skb )
@@ -2048,6 +2199,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2048
2199
total_packets ++ ;
2049
2200
}
2050
2201
2202
+ if (xdp_status )
2203
+ igc_finalize_xdp (adapter , xdp_status );
2204
+
2051
2205
/* place incomplete frames back on ring for completion */
2052
2206
rx_ring -> skb = skb ;
2053
2207
@@ -2109,8 +2263,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2109
2263
total_bytes += tx_buffer -> bytecount ;
2110
2264
total_packets += tx_buffer -> gso_segs ;
2111
2265
2112
- /* free the skb */
2113
- napi_consume_skb (tx_buffer -> skb , napi_budget );
2266
+ if (tx_buffer -> tx_flags & IGC_TX_FLAGS_XDP )
2267
+ xdp_return_frame (tx_buffer -> xdpf );
2268
+ else
2269
+ napi_consume_skb (tx_buffer -> skb , napi_budget );
2114
2270
2115
2271
/* unmap skb header data */
2116
2272
dma_unmap_single (tx_ring -> dev ,
0 commit comments