Skip to content

Commit f126779

Browse files
Huy NguyenSaeed Mahameed
authored andcommitted
net/mlx5: Fix checksum issue of VXLAN and IPsec crypto offload
The packet is VXLAN packet over IPsec transport mode tunnel which has the following format: [IP1 | ESP | UDP | VXLAN | IP2 | TCP] NVIDIA ConnectX card cannot do checksum offload for two L4 headers. The solution is using the checksum partial offload similar to VXLAN | TCP packet. Hardware calculates IP1, IP2 and TCP checksums and software calculates UDP checksum. However, unlike VXLAN | TCP case, IPsec's mlx5 driver cannot access the inner plaintext IP protocol type. Therefore, inner_ipproto is added in the sec_path structure to provide this information. Also, utilize the skb's csum_start to program L4 inner checksum offset. While at it, remove the call to mlx5e_set_eseg_swp and setup software parser fields directly in mlx5e_ipsec_set_swp. mlx5e_set_eseg_swp is not needed as the two features (GENEVE and IPsec) are different and adding this sharing layer creates unnecessary complexity and affect performance. For the case VXLAN packet over IPsec tunnel mode tunnel, checksum offload is disabled because the hardware does not support checksum offload for three L3 (IP) headers. Signed-off-by: Raed Salem <[email protected]> Signed-off-by: Huy Nguyen <[email protected]> Cc: Steffen Klassert <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]>
1 parent fa45352 commit f126779

File tree

2 files changed

+70
-19
lines changed

2 files changed

+70
-19
lines changed

drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c

Lines changed: 49 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -136,8 +136,6 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
136136
struct mlx5_wqe_eth_seg *eseg, u8 mode,
137137
struct xfrm_offload *xo)
138138
{
139-
struct mlx5e_swp_spec swp_spec = {};
140-
141139
/* Tunnel Mode:
142140
* SWP: OutL3 InL3 InL4
143141
* Pkt: MAC IP ESP IP L4
@@ -146,23 +144,58 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
146144
* SWP: OutL3 InL4
147145
* InL3
148146
* Pkt: MAC IP ESP L4
147+
*
148+
* Tunnel(VXLAN TCP/UDP) over Transport Mode
149+
* SWP: OutL3 InL3 InL4
150+
* Pkt: MAC IP ESP UDP VXLAN IP L4
149151
*/
150-
swp_spec.l3_proto = skb->protocol;
151-
swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
152-
if (swp_spec.is_tun) {
153-
if (xo->proto == IPPROTO_IPV6) {
154-
swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
155-
swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
156-
} else {
157-
swp_spec.tun_l3_proto = htons(ETH_P_IP);
158-
swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
159-
}
160-
} else {
161-
swp_spec.tun_l3_proto = skb->protocol;
162-
swp_spec.tun_l4_proto = xo->proto;
152+
153+
/* Shared settings */
154+
eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
155+
if (skb->protocol == htons(ETH_P_IPV6))
156+
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
157+
158+
/* Tunnel mode */
159+
if (mode == XFRM_MODE_TUNNEL) {
160+
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
161+
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
162+
if (xo->proto == IPPROTO_IPV6)
163+
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
164+
if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
165+
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
166+
return;
167+
}
168+
169+
/* Transport mode */
170+
if (mode != XFRM_MODE_TRANSPORT)
171+
return;
172+
173+
if (!xo->inner_ipproto) {
174+
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
175+
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
176+
if (skb->protocol == htons(ETH_P_IPV6))
177+
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
178+
if (xo->proto == IPPROTO_UDP)
179+
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
180+
return;
181+
}
182+
183+
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
184+
switch (xo->inner_ipproto) {
185+
case IPPROTO_UDP:
186+
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
187+
fallthrough;
188+
case IPPROTO_TCP:
189+
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
190+
eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
191+
if (skb->protocol == htons(ETH_P_IPV6))
192+
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
193+
break;
194+
default:
195+
break;
163196
}
164197

165-
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
198+
return;
166199
}
167200

168201
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,

drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,16 +96,34 @@ void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
9696
static inline netdev_features_t
9797
mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
9898
{
99+
struct xfrm_offload *xo = xfrm_offload(skb);
99100
struct sec_path *sp = skb_sec_path(skb);
100101

101-
if (sp && sp->len) {
102+
if (sp && sp->len && xo) {
102103
struct xfrm_state *x = sp->xvec[0];
103104

104-
if (x && x->xso.offload_handle)
105-
return features;
105+
if (!x || !x->xso.offload_handle)
106+
goto out_disable;
107+
108+
if (xo->inner_ipproto) {
109+
/* Cannot support tunnel packet over IPsec tunnel mode
110+
* because we cannot offload three IP header csum
111+
*/
112+
if (x->props.mode == XFRM_MODE_TUNNEL)
113+
goto out_disable;
114+
115+
/* Only support UDP or TCP L4 checksum */
116+
if (xo->inner_ipproto != IPPROTO_UDP &&
117+
xo->inner_ipproto != IPPROTO_TCP)
118+
goto out_disable;
119+
}
120+
121+
return features;
122+
106123
}
107124

108125
/* Disable CSUM and GSO for software IPsec */
126+
out_disable:
109127
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
110128
}
111129

0 commit comments

Comments
 (0)