1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <kunit/test.h>
7 #include <linux/skbuff.h>
9 static const char hdr[] = "abcdefgh";
10 #define GSO_TEST_SIZE 1000
12 static void __init_skb(struct sk_buff *skb)
14 skb_reset_mac_header(skb);
15 memcpy(skb_mac_header(skb), hdr, sizeof(hdr));
17 /* skb_segment expects skb->data at start of payload */
18 skb_pull(skb, sizeof(hdr));
19 skb_reset_network_header(skb);
20 skb_reset_transport_header(skb);
22 /* proto is arbitrary, as long as not ETH_P_TEB or vlan */
23 skb->protocol = htons(ETH_P_ATALK);
24 skb_shinfo(skb)->gso_size = GSO_TEST_SIZE;
34 GSO_TEST_FRAG_LIST_PURE,
35 GSO_TEST_FRAG_LIST_NON_UNIFORM,
36 GSO_TEST_GSO_BY_FRAGS,
39 struct gso_test_case {
44 unsigned int linear_len;
45 unsigned int nr_frags;
46 const unsigned int *frags;
47 unsigned int nr_frag_skbs;
48 const unsigned int *frag_skbs;
50 /* output as expected */
52 const unsigned int *segs;
55 static struct gso_test_case cases[] = {
57 .id = GSO_TEST_NO_GSO,
59 .linear_len = GSO_TEST_SIZE,
61 .segs = (const unsigned int[]) { GSO_TEST_SIZE },
64 .id = GSO_TEST_LINEAR,
66 .linear_len = GSO_TEST_SIZE + GSO_TEST_SIZE + 1,
68 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
73 .linear_len = GSO_TEST_SIZE,
75 .frags = (const unsigned int[]) { GSO_TEST_SIZE, 1 },
77 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
80 .id = GSO_TEST_FRAGS_PURE,
83 .frags = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 },
85 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 },
88 .id = GSO_TEST_GSO_PARTIAL,
89 .name = "gso_partial",
90 .linear_len = GSO_TEST_SIZE,
92 .frags = (const unsigned int[]) { GSO_TEST_SIZE, 3 },
94 .segs = (const unsigned int[]) { 2 * GSO_TEST_SIZE, 3 },
97 /* commit 89319d3801d1: frag_list on mss boundaries */
98 .id = GSO_TEST_FRAG_LIST,
100 .linear_len = GSO_TEST_SIZE,
102 .frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
104 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE },
107 .id = GSO_TEST_FRAG_LIST_PURE,
108 .name = "frag_list_pure",
110 .frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
112 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
115 /* commit 43170c4e0ba7: GRO of frag_list trains */
116 .id = GSO_TEST_FRAG_LIST_NON_UNIFORM,
117 .name = "frag_list_non_uniform",
118 .linear_len = GSO_TEST_SIZE,
120 .frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, 1, GSO_TEST_SIZE, 2 },
122 .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE, 3 },
125 /* commit 3953c46c3ac7 ("sk_buff: allow segmenting based on frag sizes") and
126 * commit 90017accff61 ("sctp: Add GSO support")
128 * "there will be a cover skb with protocol headers and
129 * children ones containing the actual segments"
131 .id = GSO_TEST_GSO_BY_FRAGS,
132 .name = "gso_by_frags",
134 .frag_skbs = (const unsigned int[]) { 100, 200, 300, 400 },
136 .segs = (const unsigned int[]) { 100, 200, 300, 400 },
140 static void gso_test_case_to_desc(struct gso_test_case *t, char *desc)
142 sprintf(desc, "%s", t->name);
145 KUNIT_ARRAY_PARAM(gso_test, cases, gso_test_case_to_desc);
147 static void gso_test_func(struct kunit *test)
149 const int shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
150 struct sk_buff *skb, *segs, *cur, *next, *last;
151 const struct gso_test_case *tcase;
152 netdev_features_t features;
156 tcase = test->param_value;
158 page = alloc_page(GFP_KERNEL);
159 KUNIT_ASSERT_NOT_NULL(test, page);
160 skb = build_skb(page_address(page), sizeof(hdr) + tcase->linear_len + shinfo_size);
161 KUNIT_ASSERT_NOT_NULL(test, skb);
162 __skb_put(skb, sizeof(hdr) + tcase->linear_len);
166 if (tcase->nr_frags) {
167 unsigned int pg_off = 0;
169 page = alloc_page(GFP_KERNEL);
170 KUNIT_ASSERT_NOT_NULL(test, page);
171 page_ref_add(page, tcase->nr_frags - 1);
173 for (i = 0; i < tcase->nr_frags; i++) {
174 skb_fill_page_desc(skb, i, page, pg_off, tcase->frags[i]);
175 pg_off += tcase->frags[i];
178 KUNIT_ASSERT_LE(test, pg_off, PAGE_SIZE);
180 skb->data_len = pg_off;
181 skb->len += skb->data_len;
182 skb->truesize += skb->data_len;
185 if (tcase->frag_skbs) {
186 unsigned int total_size = 0, total_true_size = 0;
187 struct sk_buff *frag_skb, *prev = NULL;
189 for (i = 0; i < tcase->nr_frag_skbs; i++) {
190 unsigned int frag_size;
192 page = alloc_page(GFP_KERNEL);
193 KUNIT_ASSERT_NOT_NULL(test, page);
195 frag_size = tcase->frag_skbs[i];
196 frag_skb = build_skb(page_address(page),
197 frag_size + shinfo_size);
198 KUNIT_ASSERT_NOT_NULL(test, frag_skb);
199 __skb_put(frag_skb, frag_size);
202 prev->next = frag_skb;
204 skb_shinfo(skb)->frag_list = frag_skb;
207 total_size += frag_size;
208 total_true_size += frag_skb->truesize;
211 skb->len += total_size;
212 skb->data_len += total_size;
213 skb->truesize += total_true_size;
215 if (tcase->id == GSO_TEST_GSO_BY_FRAGS)
216 skb_shinfo(skb)->gso_size = GSO_BY_FRAGS;
219 features = NETIF_F_SG | NETIF_F_HW_CSUM;
220 if (tcase->id == GSO_TEST_GSO_PARTIAL)
221 features |= NETIF_F_GSO_PARTIAL;
223 /* TODO: this should also work with SG,
224 * rather than hit BUG_ON(i >= nfrags)
226 if (tcase->id == GSO_TEST_FRAG_LIST_NON_UNIFORM)
227 features &= ~NETIF_F_SG;
229 segs = skb_segment(skb, features);
231 KUNIT_FAIL(test, "segs error %pe", segs);
234 KUNIT_FAIL(test, "no segments");
239 for (cur = segs, i = 0; cur; cur = next, i++) {
242 KUNIT_ASSERT_EQ(test, cur->len, sizeof(hdr) + tcase->segs[i]);
244 /* segs have skb->data pointing to the mac header */
245 KUNIT_ASSERT_PTR_EQ(test, skb_mac_header(cur), cur->data);
246 KUNIT_ASSERT_PTR_EQ(test, skb_network_header(cur), cur->data + sizeof(hdr));
248 /* header was copied to all segs */
249 KUNIT_ASSERT_EQ(test, memcmp(skb_mac_header(cur), hdr, sizeof(hdr)), 0);
251 /* last seg can be found through segs->prev pointer */
253 KUNIT_ASSERT_PTR_EQ(test, cur, last);
258 KUNIT_ASSERT_EQ(test, i, tcase->nr_segs);
264 /* IP tunnel flags */
266 #include <net/ip_tunnels.h>
268 struct ip_tunnel_flags_test {
280 #define IP_TUNNEL_FLAGS_TEST(n, src, comp, eval, exp) { \
283 .src_num = ARRAY_SIZE(src), \
284 .exp_comp = (comp), \
287 .exp_num = ARRAY_SIZE(exp), \
290 /* These are __be16-compatible and can be compared as is */
291 static const u16 ip_tunnel_flags_1[] = {
293 IP_TUNNEL_STRICT_BIT,
294 IP_TUNNEL_ERSPAN_OPT_BIT,
297 /* Due to the previous flags design limitation, setting either
298 * ``IP_TUNNEL_CSUM_BIT`` (on Big Endian) or ``IP_TUNNEL_DONT_FRAGMENT_BIT``
299 * (on Little) also sets VTI/ISATAP bit. In the bitmap implementation, they
300 * correspond to ``BIT(16)``, which is bigger than ``U16_MAX``, but still is
301 * backward-compatible.
303 #ifdef __LITTLE_ENDIAN
304 #define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_DONT_FRAGMENT_BIT
306 #define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_CSUM_BIT
309 static const u16 ip_tunnel_flags_2_src[] = {
310 IP_TUNNEL_CONFLICT_BIT,
313 static const u16 ip_tunnel_flags_2_exp[] = {
314 IP_TUNNEL_CONFLICT_BIT,
315 IP_TUNNEL_SIT_ISATAP_BIT,
318 /* Bits 17 and higher are not compatible with __be16 flags */
319 static const u16 ip_tunnel_flags_3_src[] = {
320 IP_TUNNEL_VXLAN_OPT_BIT,
326 static const u16 ip_tunnel_flags_3_exp[] = {
327 IP_TUNNEL_VXLAN_OPT_BIT,
330 static const struct ip_tunnel_flags_test ip_tunnel_flags_test[] = {
331 IP_TUNNEL_FLAGS_TEST("compat", ip_tunnel_flags_1, true,
332 cpu_to_be16(BIT(IP_TUNNEL_KEY_BIT) |
333 BIT(IP_TUNNEL_STRICT_BIT) |
334 BIT(IP_TUNNEL_ERSPAN_OPT_BIT)),
336 IP_TUNNEL_FLAGS_TEST("conflict", ip_tunnel_flags_2_src, true,
337 VTI_ISVTI, ip_tunnel_flags_2_exp),
338 IP_TUNNEL_FLAGS_TEST("new", ip_tunnel_flags_3_src, false,
339 cpu_to_be16(BIT(IP_TUNNEL_VXLAN_OPT_BIT)),
340 ip_tunnel_flags_3_exp),
344 ip_tunnel_flags_test_case_to_desc(const struct ip_tunnel_flags_test *t,
347 strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
349 KUNIT_ARRAY_PARAM(ip_tunnel_flags_test, ip_tunnel_flags_test,
350 ip_tunnel_flags_test_case_to_desc);
352 static void ip_tunnel_flags_test_run(struct kunit *test)
354 const struct ip_tunnel_flags_test *t = test->param_value;
355 IP_TUNNEL_DECLARE_FLAGS(src) = { };
356 IP_TUNNEL_DECLARE_FLAGS(exp) = { };
357 IP_TUNNEL_DECLARE_FLAGS(out);
359 for (u32 j = 0; j < t->src_num; j++)
360 __set_bit(t->src_bits[j], src);
361 for (u32 j = 0; j < t->exp_num; j++)
362 __set_bit(t->exp_bits[j], exp);
364 KUNIT_ASSERT_EQ(test, t->exp_comp,
365 ip_tunnel_flags_is_be16_compat(src));
366 KUNIT_ASSERT_EQ(test, (__force u16)t->exp_val,
367 (__force u16)ip_tunnel_flags_to_be16(src));
369 ip_tunnel_flags_from_be16(out, t->exp_val);
370 KUNIT_ASSERT_TRUE(test, __ipt_flag_op(bitmap_equal, exp, out));
373 static struct kunit_case net_test_cases[] = {
374 KUNIT_CASE_PARAM(gso_test_func, gso_test_gen_params),
375 KUNIT_CASE_PARAM(ip_tunnel_flags_test_run,
376 ip_tunnel_flags_test_gen_params),
380 static struct kunit_suite net_test_suite = {
382 .test_cases = net_test_cases,
384 kunit_test_suite(net_test_suite);
386 MODULE_DESCRIPTION("KUnit tests for networking core");
387 MODULE_LICENSE("GPL");