1 #include <linux/skbuff.h>
2 #include <linux/export.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
17 /* copy saddr & daddr, possibly using 64bit load/store
18 * Equivalent to : flow->src = iph->saddr;
19 * flow->dst = iph->daddr;
21 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
23 BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24 offsetof(typeof(*flow), src) + sizeof(flow->src));
25 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
28 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
30 int poff, nhoff = skb_network_offset(skb);
32 __be16 proto = skb->protocol;
34 memset(flow, 0, sizeof(*flow));
38 case __constant_htons(ETH_P_IP): {
39 const struct iphdr *iph;
42 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
46 if (ip_is_fragment(iph))
49 ip_proto = iph->protocol;
50 iph_to_flow_copy_addrs(flow, iph);
51 nhoff += iph->ihl * 4;
54 case __constant_htons(ETH_P_IPV6): {
55 const struct ipv6hdr *iph;
58 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
62 ip_proto = iph->nexthdr;
63 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
64 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
65 nhoff += sizeof(struct ipv6hdr);
68 case __constant_htons(ETH_P_8021AD):
69 case __constant_htons(ETH_P_8021Q): {
70 const struct vlan_hdr *vlan;
71 struct vlan_hdr _vlan;
73 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
77 proto = vlan->h_vlan_encapsulated_proto;
78 nhoff += sizeof(*vlan);
81 case __constant_htons(ETH_P_PPP_SES): {
86 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
90 nhoff += PPPOE_SES_HLEN;
92 case __constant_htons(PPP_IP):
94 case __constant_htons(PPP_IPV6):
111 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
115 * Only look inside GRE if version zero and no
118 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
121 if (hdr->flags & GRE_CSUM)
123 if (hdr->flags & GRE_KEY)
125 if (hdr->flags & GRE_SEQ)
127 if (proto == htons(ETH_P_TEB)) {
128 const struct ethhdr *eth;
131 eth = skb_header_pointer(skb, nhoff,
132 sizeof(_eth), &_eth);
135 proto = eth->h_proto;
136 nhoff += sizeof(*eth);
148 flow->ip_proto = ip_proto;
149 poff = proto_ports_offset(ip_proto);
151 __be32 *ports, _ports;
154 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
156 flow->ports = *ports;
159 flow->thoff = (u16) nhoff;
163 EXPORT_SYMBOL(skb_flow_dissect);
165 static u32 hashrnd __read_mostly;
168 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
169 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
170 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
171 * if hash is a canonical 4-tuple hash over transport ports.
173 void __skb_get_rxhash(struct sk_buff *skb)
175 struct flow_keys keys;
178 if (!skb_flow_dissect(skb, &keys))
184 /* get a consistent hash (same value on both flow directions) */
185 if (((__force u32)keys.dst < (__force u32)keys.src) ||
186 (((__force u32)keys.dst == (__force u32)keys.src) &&
187 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
188 swap(keys.dst, keys.src);
189 swap(keys.port16[0], keys.port16[1]);
192 hash = jhash_3words((__force u32)keys.dst,
193 (__force u32)keys.src,
194 (__force u32)keys.ports, hashrnd);
200 EXPORT_SYMBOL(__skb_get_rxhash);
203 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
204 * to be used as a distribution range.
206 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
207 unsigned int num_tx_queues)
211 u16 qcount = num_tx_queues;
213 if (skb_rx_queue_recorded(skb)) {
214 hash = skb_get_rx_queue(skb);
215 while (unlikely(hash >= num_tx_queues))
216 hash -= num_tx_queues;
221 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
222 qoffset = dev->tc_to_txq[tc].offset;
223 qcount = dev->tc_to_txq[tc].count;
226 if (skb->sk && skb->sk->sk_hash)
227 hash = skb->sk->sk_hash;
229 hash = (__force u16) skb->protocol;
230 hash = jhash_1word(hash, hashrnd);
232 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
234 EXPORT_SYMBOL(__skb_tx_hash);
236 /* __skb_get_poff() returns the offset to the payload as far as it could
237 * be dissected. The main user is currently BPF, so that we can dynamically
238 * truncate packets without needing to push actual payload to the user
239 * space and can analyze headers only, instead.
241 u32 __skb_get_poff(const struct sk_buff *skb)
243 struct flow_keys keys;
246 if (!skb_flow_dissect(skb, &keys))
250 switch (keys.ip_proto) {
252 const struct tcphdr *tcph;
255 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
259 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
263 case IPPROTO_UDPLITE:
264 poff += sizeof(struct udphdr);
266 /* For the rest, we do not really care about header
267 * extensions at this point for now.
270 poff += sizeof(struct icmphdr);
273 poff += sizeof(struct icmp6hdr);
276 poff += sizeof(struct igmphdr);
279 poff += sizeof(struct dccp_hdr);
282 poff += sizeof(struct sctphdr);
289 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
291 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
292 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
293 dev->name, queue_index,
294 dev->real_num_tx_queues);
300 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
303 struct xps_dev_maps *dev_maps;
305 int queue_index = -1;
308 dev_maps = rcu_dereference(dev->xps_maps);
310 map = rcu_dereference(
311 dev_maps->cpu_map[raw_smp_processor_id()]);
314 queue_index = map->queues[0];
317 if (skb->sk && skb->sk->sk_hash)
318 hash = skb->sk->sk_hash;
320 hash = (__force u16) skb->protocol ^
322 hash = jhash_1word(hash, hashrnd);
323 queue_index = map->queues[
324 ((u64)hash * map->len) >> 32];
326 if (unlikely(queue_index >= dev->real_num_tx_queues))
338 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
340 struct sock *sk = skb->sk;
341 int queue_index = sk_tx_queue_get(sk);
343 if (queue_index < 0 || skb->ooo_okay ||
344 queue_index >= dev->real_num_tx_queues) {
345 int new_index = get_xps_queue(dev, skb);
347 new_index = skb_tx_hash(dev, skb);
349 if (queue_index != new_index && sk &&
350 rcu_access_pointer(sk->sk_dst_cache))
351 sk_tx_queue_set(sk, queue_index);
353 queue_index = new_index;
358 EXPORT_SYMBOL(__netdev_pick_tx);
360 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
365 if (dev->real_num_tx_queues != 1) {
366 const struct net_device_ops *ops = dev->netdev_ops;
367 if (ops->ndo_select_queue)
368 queue_index = ops->ndo_select_queue(dev, skb);
370 queue_index = __netdev_pick_tx(dev, skb);
371 queue_index = dev_cap_txqueue(dev, queue_index);
374 skb_set_queue_mapping(skb, queue_index);
375 return netdev_get_tx_queue(dev, queue_index);
378 static int __init initialize_hashrnd(void)
380 get_random_bytes(&hashrnd, sizeof(hashrnd));
384 late_initcall_sync(initialize_hashrnd);