1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 /* COMMON Applications Kept Enhanced (CAKE) discipline
12 * The CAKE Principles:
13 * (or, how to have your cake and eat it too)
15 * This is a combination of several shaping, AQM and FQ techniques into one
16 * easy-to-use package:
18 * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
19 * equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
20 * eliminating the need for any sort of burst parameter (eg. token bucket
21 * depth). Burst support is limited to that necessary to overcome scheduling
24 * - A Diffserv-aware priority queue, giving more priority to certain classes,
25 * up to a specified fraction of bandwidth. Above that bandwidth threshold,
26 * the priority is reduced to avoid starving other tins.
28 * - Each priority tin has a separate Flow Queue system, to isolate traffic
29 * flows from each other. This prevents a burst on one flow from increasing
30 * the delay to another. Flows are distributed to queues using a
31 * set-associative hash function.
33 * - Each queue is actively managed by Cobalt, which is a combination of the
34 * Codel and Blue AQM algorithms. This serves flows fairly, and signals
35 * congestion early via ECN (if available) and/or packet drops, to keep
36 * latency low. The codel parameters are auto-tuned based on the bandwidth
37 * setting, as is necessary at low bandwidths.
39 * The configuration parameters are kept deliberately simple for ease of use.
40 * Everything has sane defaults. Complete generality of configuration is *not*
43 * The priority queue operates according to a weighted DRR scheme, combined with
44 * a bandwidth tracker which reuses the shaper logic to detect which side of the
45 * bandwidth sharing threshold the tin is operating. This determines whether a
46 * priority-based weight (high) or a bandwidth-based weight (low) is used for
47 * that tin in the current pass.
49 * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
50 * granted us permission to leverage.
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/jiffies.h>
57 #include <linux/string.h>
59 #include <linux/errno.h>
60 #include <linux/init.h>
61 #include <linux/skbuff.h>
62 #include <linux/jhash.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <linux/reciprocal_div.h>
66 #include <net/netlink.h>
67 #include <linux/if_vlan.h>
68 #include <net/pkt_sched.h>
69 #include <net/pkt_cls.h>
71 #include <net/flow_dissector.h>
73 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
74 #include <net/netfilter/nf_conntrack_core.h>
77 #define CAKE_SET_WAYS (8)
78 #define CAKE_MAX_TINS (8)
79 #define CAKE_QUEUES (1024)
80 #define CAKE_FLOW_MASK 63
81 #define CAKE_FLOW_NAT_FLAG 64
83 /* struct cobalt_params - contains codel and blue parameters
84 * @interval: codel initial drop rate
85 * @target: maximum persistent sojourn time & blue update rate
86 * @mtu_time: serialisation delay of maximum-size packet
87 * @p_inc: increment of blue drop probability (0.32 fxp)
88 * @p_dec: decrement of blue drop probability (0.32 fxp)
90 struct cobalt_params {
98 /* struct cobalt_vars - contains codel and blue variables
99 * @count: codel dropping frequency
100 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
101 * @drop_next: time to drop next packet, or when we dropped last
102 * @blue_timer: Blue time to next drop
103 * @p_drop: BLUE drop probability (0.32 fxp)
104 * @dropping: set if in dropping state
105 * @ecn_marked: set if marked
120 CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
126 /* this stuff is all needed per-flow at dequeue time */
127 struct sk_buff *head;
128 struct sk_buff *tail;
129 struct list_head flowchain;
132 struct cobalt_vars cvars;
133 u16 srchost; /* index into cake_host table */
136 }; /* please try to keep this structure <= 64 bytes */
145 struct cake_heap_entry {
149 struct cake_tin_data {
150 struct cake_flow flows[CAKE_QUEUES];
151 u32 backlogs[CAKE_QUEUES];
152 u32 tags[CAKE_QUEUES]; /* for set association */
153 u16 overflow_idx[CAKE_QUEUES];
154 struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
157 struct cobalt_params cparams;
160 u16 sparse_flow_count;
161 u16 decaying_flow_count;
162 u16 unresponsive_flow_count;
166 struct list_head new_flows;
167 struct list_head old_flows;
168 struct list_head decaying_flows;
170 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
171 ktime_t time_next_packet;
176 u16 tin_quantum_prio;
177 u16 tin_quantum_band;
188 /* moving averages */
193 /* hash function stats */
198 }; /* number of tins is small, so size of this struct doesn't matter much */
200 struct cake_sched_data {
201 struct tcf_proto __rcu *filter_list; /* optional external classifier */
202 struct tcf_block *block;
203 struct cake_tin_data *tins;
205 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
206 u16 overflow_timeout;
214 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
216 ktime_t time_next_packet;
217 ktime_t failsafe_next_packet;
226 /* resource tracking */
230 u32 buffer_config_limit;
232 /* indices for dequeue */
236 struct qdisc_watchdog watchdog;
240 /* bandwidth capacity estimate */
241 ktime_t last_packet_time;
242 ktime_t avg_window_begin;
243 u64 avg_packet_interval;
244 u64 avg_window_bytes;
245 u64 avg_peak_bandwidth;
246 ktime_t last_reconfig_time;
248 /* packet length stats */
257 CAKE_FLAG_OVERHEAD = BIT(0),
258 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
259 CAKE_FLAG_INGRESS = BIT(2),
260 CAKE_FLAG_WASH = BIT(3),
261 CAKE_FLAG_SPLIT_GSO = BIT(4)
264 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
265 * obtain the best features of each. Codel is excellent on flows which
266 * respond to congestion signals in a TCP-like way. BLUE is more effective on
267 * unresponsive flows.
270 struct cobalt_skb_cb {
271 ktime_t enqueue_time;
275 static u64 us_to_ns(u64 us)
277 return us * NSEC_PER_USEC;
280 static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
282 qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
283 return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
286 static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
288 return get_cobalt_cb(skb)->enqueue_time;
291 static void cobalt_set_enqueue_time(struct sk_buff *skb,
294 get_cobalt_cb(skb)->enqueue_time = now;
297 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
299 /* Diffserv lookup tables */
301 static const u8 precedence[] = {
302 0, 0, 0, 0, 0, 0, 0, 0,
303 1, 1, 1, 1, 1, 1, 1, 1,
304 2, 2, 2, 2, 2, 2, 2, 2,
305 3, 3, 3, 3, 3, 3, 3, 3,
306 4, 4, 4, 4, 4, 4, 4, 4,
307 5, 5, 5, 5, 5, 5, 5, 5,
308 6, 6, 6, 6, 6, 6, 6, 6,
309 7, 7, 7, 7, 7, 7, 7, 7,
312 static const u8 diffserv8[] = {
313 2, 5, 1, 2, 4, 2, 2, 2,
314 0, 2, 1, 2, 1, 2, 1, 2,
315 5, 2, 4, 2, 4, 2, 4, 2,
316 3, 2, 3, 2, 3, 2, 3, 2,
317 6, 2, 3, 2, 3, 2, 3, 2,
318 6, 2, 2, 2, 6, 2, 6, 2,
319 7, 2, 2, 2, 2, 2, 2, 2,
320 7, 2, 2, 2, 2, 2, 2, 2,
323 static const u8 diffserv4[] = {
324 0, 2, 0, 0, 2, 0, 0, 0,
325 1, 0, 0, 0, 0, 0, 0, 0,
326 2, 0, 2, 0, 2, 0, 2, 0,
327 2, 0, 2, 0, 2, 0, 2, 0,
328 3, 0, 2, 0, 2, 0, 2, 0,
329 3, 0, 0, 0, 3, 0, 3, 0,
330 3, 0, 0, 0, 0, 0, 0, 0,
331 3, 0, 0, 0, 0, 0, 0, 0,
334 static const u8 diffserv3[] = {
335 0, 0, 0, 0, 2, 0, 0, 0,
336 1, 0, 0, 0, 0, 0, 0, 0,
337 0, 0, 0, 0, 0, 0, 0, 0,
338 0, 0, 0, 0, 0, 0, 0, 0,
339 0, 0, 0, 0, 0, 0, 0, 0,
340 0, 0, 0, 0, 2, 0, 2, 0,
341 2, 0, 0, 0, 0, 0, 0, 0,
342 2, 0, 0, 0, 0, 0, 0, 0,
345 static const u8 besteffort[] = {
346 0, 0, 0, 0, 0, 0, 0, 0,
347 0, 0, 0, 0, 0, 0, 0, 0,
348 0, 0, 0, 0, 0, 0, 0, 0,
349 0, 0, 0, 0, 0, 0, 0, 0,
350 0, 0, 0, 0, 0, 0, 0, 0,
351 0, 0, 0, 0, 0, 0, 0, 0,
352 0, 0, 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0, 0, 0,
356 /* tin priority order for stats dumping */
358 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
359 static const u8 bulk_order[] = {1, 0, 2, 3};
361 #define REC_INV_SQRT_CACHE (16)
362 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
364 /* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
365 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
367 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
370 static void cobalt_newton_step(struct cobalt_vars *vars)
372 u32 invsqrt, invsqrt2;
375 invsqrt = vars->rec_inv_sqrt;
376 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
377 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
379 val >>= 2; /* avoid overflow in following multiply */
380 val = (val * invsqrt) >> (32 - 2 + 1);
382 vars->rec_inv_sqrt = val;
385 static void cobalt_invsqrt(struct cobalt_vars *vars)
387 if (vars->count < REC_INV_SQRT_CACHE)
388 vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
390 cobalt_newton_step(vars);
393 /* There is a big difference in timing between the accurate values placed in
394 * the cache and the approximations given by a single Newton step for small
395 * count values, particularly when stepping from count 1 to 2 or vice versa.
396 * Above 16, a single Newton step gives sufficient accuracy in either
397 * direction, given the precision stored.
399 * The magnitude of the error when stepping up to count 2 is such as to give
400 * the value that *should* have been produced at count 4.
403 static void cobalt_cache_init(void)
405 struct cobalt_vars v;
407 memset(&v, 0, sizeof(v));
408 v.rec_inv_sqrt = ~0U;
409 cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
411 for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
412 cobalt_newton_step(&v);
413 cobalt_newton_step(&v);
414 cobalt_newton_step(&v);
415 cobalt_newton_step(&v);
417 cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
421 static void cobalt_vars_init(struct cobalt_vars *vars)
423 memset(vars, 0, sizeof(*vars));
425 if (!cobalt_rec_inv_sqrt_cache[0]) {
427 cobalt_rec_inv_sqrt_cache[0] = ~0;
431 /* CoDel control_law is t + interval/sqrt(count)
432 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
433 * both sqrt() and divide operation.
435 static ktime_t cobalt_control(ktime_t t,
439 return ktime_add_ns(t, reciprocal_scale(interval,
443 /* Call this when a packet had to be dropped due to queue overflow. Returns
444 * true if the BLUE state was quiescent before but active after this call.
446 static bool cobalt_queue_full(struct cobalt_vars *vars,
447 struct cobalt_params *p,
452 if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
454 vars->p_drop += p->p_inc;
455 if (vars->p_drop < p->p_inc)
457 vars->blue_timer = now;
459 vars->dropping = true;
460 vars->drop_next = now;
467 /* Call this when the queue was serviced but turned out to be empty. Returns
468 * true if the BLUE state was active before but quiescent after this call.
470 static bool cobalt_queue_empty(struct cobalt_vars *vars,
471 struct cobalt_params *p,
477 ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
478 if (vars->p_drop < p->p_dec)
481 vars->p_drop -= p->p_dec;
482 vars->blue_timer = now;
483 down = !vars->p_drop;
485 vars->dropping = false;
487 if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
489 cobalt_invsqrt(vars);
490 vars->drop_next = cobalt_control(vars->drop_next,
498 /* Call this with a freshly dequeued packet for possible congestion marking.
499 * Returns true as an instruction to drop the packet, false for delivery.
501 static bool cobalt_should_drop(struct cobalt_vars *vars,
502 struct cobalt_params *p,
507 bool next_due, over_target, drop = false;
511 /* The 'schedule' variable records, in its sign, whether 'now' is before or
512 * after 'drop_next'. This allows 'drop_next' to be updated before the next
513 * scheduling decision is actually branched, without destroying that
514 * information. Similarly, the first 'schedule' value calculated is preserved
515 * in the boolean 'next_due'.
517 * As for 'drop_next', we take advantage of the fact that 'interval' is both
518 * the delay between first exceeding 'target' and the first signalling event,
519 * *and* the scaling factor for the signalling frequency. It's therefore very
520 * natural to use a single mechanism for both purposes, and eliminates a
521 * significant amount of reference Codel's spaghetti code. To help with this,
522 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
523 * as possible to 1.0 in fixed-point.
526 sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
527 schedule = ktime_sub(now, vars->drop_next);
528 over_target = sojourn > p->target &&
529 sojourn > p->mtu_time * bulk_flows * 2 &&
530 sojourn > p->mtu_time * 4;
531 next_due = vars->count && ktime_to_ns(schedule) >= 0;
533 vars->ecn_marked = false;
536 if (!vars->dropping) {
537 vars->dropping = true;
538 vars->drop_next = cobalt_control(now,
544 } else if (vars->dropping) {
545 vars->dropping = false;
548 if (next_due && vars->dropping) {
549 /* Use ECN mark if possible, otherwise drop */
550 drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
555 cobalt_invsqrt(vars);
556 vars->drop_next = cobalt_control(vars->drop_next,
559 schedule = ktime_sub(now, vars->drop_next);
563 cobalt_invsqrt(vars);
564 vars->drop_next = cobalt_control(vars->drop_next,
567 schedule = ktime_sub(now, vars->drop_next);
568 next_due = vars->count && ktime_to_ns(schedule) >= 0;
572 /* Simple BLUE implementation. Lack of ECN is deliberate. */
574 drop |= (prandom_u32() < vars->p_drop);
576 /* Overload the drop_next field as an activity timeout */
578 vars->drop_next = ktime_add_ns(now, p->interval);
579 else if (ktime_to_ns(schedule) > 0 && !drop)
580 vars->drop_next = now;
585 static void cake_update_flowkeys(struct flow_keys *keys,
586 const struct sk_buff *skb)
588 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
589 struct nf_conntrack_tuple tuple = {};
590 bool rev = !skb->_nfct;
592 if (tc_skb_protocol(skb) != htons(ETH_P_IP))
595 if (!nf_ct_get_tuple_skb(&tuple, skb))
598 keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
599 keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
601 if (keys->ports.ports) {
602 keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
603 keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
608 /* Cake has several subtle multiple bit settings. In these cases you
609 * would be matching triple isolate mode as well.
612 static bool cake_dsrc(int flow_mode)
614 return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
617 static bool cake_ddst(int flow_mode)
619 return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
622 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
623 int flow_mode, u16 flow_override, u16 host_override)
625 u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
626 u16 reduced_hash, srchost_idx, dsthost_idx;
627 struct flow_keys keys, host_keys;
629 if (unlikely(flow_mode == CAKE_FLOW_NONE))
632 /* If both overrides are set we can skip packet dissection entirely */
633 if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
634 (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
637 skb_flow_dissect_flow_keys(skb, &keys,
638 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
640 if (flow_mode & CAKE_FLOW_NAT_FLAG)
641 cake_update_flowkeys(&keys, skb);
643 /* flow_hash_from_keys() sorts the addresses by value, so we have
644 * to preserve their order in a separate data structure to treat
645 * src and dst host addresses as independently selectable.
648 host_keys.ports.ports = 0;
649 host_keys.basic.ip_proto = 0;
650 host_keys.keyid.keyid = 0;
651 host_keys.tags.flow_label = 0;
653 switch (host_keys.control.addr_type) {
654 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
655 host_keys.addrs.v4addrs.src = 0;
656 dsthost_hash = flow_hash_from_keys(&host_keys);
657 host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
658 host_keys.addrs.v4addrs.dst = 0;
659 srchost_hash = flow_hash_from_keys(&host_keys);
662 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
663 memset(&host_keys.addrs.v6addrs.src, 0,
664 sizeof(host_keys.addrs.v6addrs.src));
665 dsthost_hash = flow_hash_from_keys(&host_keys);
666 host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
667 memset(&host_keys.addrs.v6addrs.dst, 0,
668 sizeof(host_keys.addrs.v6addrs.dst));
669 srchost_hash = flow_hash_from_keys(&host_keys);
677 /* This *must* be after the above switch, since as a
678 * side-effect it sorts the src and dst addresses.
680 if (flow_mode & CAKE_FLOW_FLOWS)
681 flow_hash = flow_hash_from_keys(&keys);
685 flow_hash = flow_override - 1;
687 dsthost_hash = host_override - 1;
688 srchost_hash = host_override - 1;
691 if (!(flow_mode & CAKE_FLOW_FLOWS)) {
692 if (flow_mode & CAKE_FLOW_SRC_IP)
693 flow_hash ^= srchost_hash;
695 if (flow_mode & CAKE_FLOW_DST_IP)
696 flow_hash ^= dsthost_hash;
699 reduced_hash = flow_hash % CAKE_QUEUES;
701 /* set-associative hashing */
702 /* fast path if no hash collision (direct lookup succeeds) */
703 if (likely(q->tags[reduced_hash] == flow_hash &&
704 q->flows[reduced_hash].set)) {
707 u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
708 u32 outer_hash = reduced_hash - inner_hash;
709 bool allocate_src = false;
710 bool allocate_dst = false;
713 /* check if any active queue in the set is reserved for
716 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
717 i++, k = (k + 1) % CAKE_SET_WAYS) {
718 if (q->tags[outer_hash + k] == flow_hash) {
722 if (!q->flows[outer_hash + k].set) {
723 /* need to increment host refcnts */
724 allocate_src = cake_dsrc(flow_mode);
725 allocate_dst = cake_ddst(flow_mode);
732 /* no queue is reserved for this flow, look for an
735 for (i = 0; i < CAKE_SET_WAYS;
736 i++, k = (k + 1) % CAKE_SET_WAYS) {
737 if (!q->flows[outer_hash + k].set) {
739 allocate_src = cake_dsrc(flow_mode);
740 allocate_dst = cake_ddst(flow_mode);
745 /* With no empty queues, default to the original
746 * queue, accept the collision, update the host tags.
749 q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
750 q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
751 allocate_src = cake_dsrc(flow_mode);
752 allocate_dst = cake_ddst(flow_mode);
754 /* reserve queue for future packets in same flow */
755 reduced_hash = outer_hash + k;
756 q->tags[reduced_hash] = flow_hash;
759 srchost_idx = srchost_hash % CAKE_QUEUES;
760 inner_hash = srchost_idx % CAKE_SET_WAYS;
761 outer_hash = srchost_idx - inner_hash;
762 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
763 i++, k = (k + 1) % CAKE_SET_WAYS) {
764 if (q->hosts[outer_hash + k].srchost_tag ==
768 for (i = 0; i < CAKE_SET_WAYS;
769 i++, k = (k + 1) % CAKE_SET_WAYS) {
770 if (!q->hosts[outer_hash + k].srchost_refcnt)
773 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
775 srchost_idx = outer_hash + k;
776 q->hosts[srchost_idx].srchost_refcnt++;
777 q->flows[reduced_hash].srchost = srchost_idx;
781 dsthost_idx = dsthost_hash % CAKE_QUEUES;
782 inner_hash = dsthost_idx % CAKE_SET_WAYS;
783 outer_hash = dsthost_idx - inner_hash;
784 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
785 i++, k = (k + 1) % CAKE_SET_WAYS) {
786 if (q->hosts[outer_hash + k].dsthost_tag ==
790 for (i = 0; i < CAKE_SET_WAYS;
791 i++, k = (k + 1) % CAKE_SET_WAYS) {
792 if (!q->hosts[outer_hash + k].dsthost_refcnt)
795 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
797 dsthost_idx = outer_hash + k;
798 q->hosts[dsthost_idx].dsthost_refcnt++;
799 q->flows[reduced_hash].dsthost = dsthost_idx;
806 /* helper functions : might be changed when/if skb use a standard list_head */
807 /* remove one skb from head of slot queue */
809 static struct sk_buff *dequeue_head(struct cake_flow *flow)
811 struct sk_buff *skb = flow->head;
814 flow->head = skb->next;
815 skb_mark_not_on_list(skb);
821 /* add skb to flow queue (tail add) */
823 static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
828 flow->tail->next = skb;
833 static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
836 unsigned int offset = skb_network_offset(skb);
839 iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
844 if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
845 return skb_header_pointer(skb, offset + iph->ihl * 4,
846 sizeof(struct ipv6hdr), buf);
848 else if (iph->version == 4)
851 else if (iph->version == 6)
852 return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
858 static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
859 void *buf, unsigned int bufsize)
861 unsigned int offset = skb_network_offset(skb);
862 const struct ipv6hdr *ipv6h;
863 const struct tcphdr *tcph;
864 const struct iphdr *iph;
865 struct ipv6hdr _ipv6h;
868 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
873 if (ipv6h->version == 4) {
874 iph = (struct iphdr *)ipv6h;
875 offset += iph->ihl * 4;
877 /* special-case 6in4 tunnelling, as that is a common way to get
878 * v6 connectivity in the home
880 if (iph->protocol == IPPROTO_IPV6) {
881 ipv6h = skb_header_pointer(skb, offset,
882 sizeof(_ipv6h), &_ipv6h);
884 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
887 offset += sizeof(struct ipv6hdr);
889 } else if (iph->protocol != IPPROTO_TCP) {
893 } else if (ipv6h->version == 6) {
894 if (ipv6h->nexthdr != IPPROTO_TCP)
897 offset += sizeof(struct ipv6hdr);
902 tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
906 return skb_header_pointer(skb, offset,
907 min(__tcp_hdrlen(tcph), bufsize), buf);
910 static const void *cake_get_tcpopt(const struct tcphdr *tcph,
911 int code, int *oplen)
913 /* inspired by tcp_parse_options in tcp_input.c */
914 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
915 const u8 *ptr = (const u8 *)(tcph + 1);
921 if (opcode == TCPOPT_EOL)
923 if (opcode == TCPOPT_NOP) {
928 if (opsize < 2 || opsize > length)
931 if (opcode == code) {
943 /* Compare two SACK sequences. A sequence is considered greater if it SACKs more
944 * bytes than the other. In the case where both sequences ACKs bytes that the
945 * other doesn't, A is considered greater. DSACKs in A also makes A be
946 * considered greater.
948 * @return -1, 0 or 1 as normal compare functions
950 static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
951 const struct tcphdr *tcph_b)
953 const struct tcp_sack_block_wire *sack_a, *sack_b;
954 u32 ack_seq_a = ntohl(tcph_a->ack_seq);
955 u32 bytes_a = 0, bytes_b = 0;
956 int oplen_a, oplen_b;
959 sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
960 sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
962 /* pointers point to option contents */
963 oplen_a -= TCPOLEN_SACK_BASE;
964 oplen_b -= TCPOLEN_SACK_BASE;
966 if (sack_a && oplen_a >= sizeof(*sack_a) &&
967 (!sack_b || oplen_b < sizeof(*sack_b)))
969 else if (sack_b && oplen_b >= sizeof(*sack_b) &&
970 (!sack_a || oplen_a < sizeof(*sack_a)))
972 else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
973 (!sack_b || oplen_b < sizeof(*sack_b)))
976 while (oplen_a >= sizeof(*sack_a)) {
977 const struct tcp_sack_block_wire *sack_tmp = sack_b;
978 u32 start_a = get_unaligned_be32(&sack_a->start_seq);
979 u32 end_a = get_unaligned_be32(&sack_a->end_seq);
980 int oplen_tmp = oplen_b;
983 /* DSACK; always considered greater to prevent dropping */
984 if (before(start_a, ack_seq_a))
987 bytes_a += end_a - start_a;
989 while (oplen_tmp >= sizeof(*sack_tmp)) {
990 u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
991 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
993 /* first time through we count the total size */
995 bytes_b += end_b - start_b;
997 if (!after(start_b, start_a) && !before(end_b, end_a)) {
1002 oplen_tmp -= sizeof(*sack_tmp);
1009 oplen_a -= sizeof(*sack_a);
1014 /* If we made it this far, all ranges SACKed by A are covered by B, so
1015 * either the SACKs are equal, or B SACKs more bytes.
1017 return bytes_b > bytes_a ? 1 : 0;
1020 static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
1021 u32 *tsval, u32 *tsecr)
1026 ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
1028 if (ptr && opsize == TCPOLEN_TIMESTAMP) {
1029 *tsval = get_unaligned_be32(ptr);
1030 *tsecr = get_unaligned_be32(ptr + 4);
1034 static bool cake_tcph_may_drop(const struct tcphdr *tcph,
1035 u32 tstamp_new, u32 tsecr_new)
1037 /* inspired by tcp_parse_options in tcp_input.c */
1038 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
1039 const u8 *ptr = (const u8 *)(tcph + 1);
1042 /* 3 reserved flags must be unset to avoid future breakage
1044 * ECE/CWR are handled separately
1045 * All other flags URG/PSH/RST/SYN/FIN must be unset
1046 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
1047 * 0x00C00000 = CWR/ECE (handled separately)
1048 * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
1050 if (((tcp_flag_word(tcph) &
1051 cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
1054 while (length > 0) {
1055 int opcode = *ptr++;
1058 if (opcode == TCPOPT_EOL)
1060 if (opcode == TCPOPT_NOP) {
1065 if (opsize < 2 || opsize > length)
1069 case TCPOPT_MD5SIG: /* doesn't influence state */
1072 case TCPOPT_SACK: /* stricter checking performed later */
1073 if (opsize % 8 != 2)
1077 case TCPOPT_TIMESTAMP:
1078 /* only drop timestamps lower than new */
1079 if (opsize != TCPOLEN_TIMESTAMP)
1081 tstamp = get_unaligned_be32(ptr);
1082 tsecr = get_unaligned_be32(ptr + 4);
1083 if (after(tstamp, tstamp_new) ||
1084 after(tsecr, tsecr_new))
1088 case TCPOPT_MSS: /* these should only be set on SYN */
1090 case TCPOPT_SACK_PERM:
1091 case TCPOPT_FASTOPEN:
1093 default: /* don't drop if any unknown options are present */
1104 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
1105 struct cake_flow *flow)
1107 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
1108 struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
1109 struct sk_buff *skb_check, *skb_prev = NULL;
1110 const struct ipv6hdr *ipv6h, *ipv6h_check;
1111 unsigned char _tcph[64], _tcph_check[64];
1112 const struct tcphdr *tcph, *tcph_check;
1113 const struct iphdr *iph, *iph_check;
1114 struct ipv6hdr _iph, _iph_check;
1115 const struct sk_buff *skb;
1116 int seglen, num_found = 0;
1117 u32 tstamp = 0, tsecr = 0;
1118 __be32 elig_flags = 0;
1121 /* no other possible ACKs to filter */
1122 if (flow->head == flow->tail)
1126 tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
1127 iph = cake_get_iphdr(skb, &_iph);
1131 cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
1133 /* the 'triggering' packet need only have the ACK flag set.
1134 * also check that SYN is not set, as there won't be any previous ACKs.
1136 if ((tcp_flag_word(tcph) &
1137 (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
1140 /* the 'triggering' ACK is at the tail of the queue, we have already
1141 * returned if it is the only packet in the flow. loop through the rest
1142 * of the queue looking for pure ACKs with the same 5-tuple as the
1145 for (skb_check = flow->head;
1146 skb_check && skb_check != skb;
1147 skb_prev = skb_check, skb_check = skb_check->next) {
1148 iph_check = cake_get_iphdr(skb_check, &_iph_check);
1149 tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
1150 sizeof(_tcph_check));
1152 /* only TCP packets with matching 5-tuple are eligible, and only
1155 if (!tcph_check || iph->version != iph_check->version ||
1156 tcph_check->source != tcph->source ||
1157 tcph_check->dest != tcph->dest)
1160 if (iph_check->version == 4) {
1161 if (iph_check->saddr != iph->saddr ||
1162 iph_check->daddr != iph->daddr)
1165 seglen = ntohs(iph_check->tot_len) -
1166 (4 * iph_check->ihl);
1167 } else if (iph_check->version == 6) {
1168 ipv6h = (struct ipv6hdr *)iph;
1169 ipv6h_check = (struct ipv6hdr *)iph_check;
1171 if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
1172 ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
1175 seglen = ntohs(ipv6h_check->payload_len);
1177 WARN_ON(1); /* shouldn't happen */
1181 /* If the ECE/CWR flags changed from the previous eligible
1182 * packet in the same flow, we should no longer be dropping that
1183 * previous packet as this would lose information.
1185 if (elig_ack && (tcp_flag_word(tcph_check) &
1186 (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
1188 elig_ack_prev = NULL;
1192 /* Check TCP options and flags, don't drop ACKs with segment
1193 * data, and don't drop ACKs with a higher cumulative ACK
1194 * counter than the triggering packet. Check ACK seqno here to
1195 * avoid parsing SACK options of packets we are going to exclude
1198 if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
1199 (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
1200 after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
1203 /* Check SACK options. The triggering packet must SACK more data
1204 * than the ACK under consideration, or SACK the same range but
1205 * have a larger cumulative ACK counter. The latter is a
1206 * pathological case, but is contained in the following check
1207 * anyway, just to be safe.
1209 sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
1211 if (sack_comp < 0 ||
1212 (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
1216 /* At this point we have found an eligible pure ACK to drop; if
1217 * we are in aggressive mode, we are done. Otherwise, keep
1218 * searching unless this is the second eligible ACK we
1221 * Since we want to drop ACK closest to the head of the queue,
1222 * save the first eligible ACK we find, even if we need to loop
1226 elig_ack = skb_check;
1227 elig_ack_prev = skb_prev;
1228 elig_flags = (tcp_flag_word(tcph_check)
1229 & (TCP_FLAG_ECE | TCP_FLAG_CWR));
1232 if (num_found++ > 0)
1236 /* We made it through the queue without finding two eligible ACKs . If
1237 * we found a single eligible ACK we can drop it in aggressive mode if
1238 * we can guarantee that this does not interfere with ECN flag
1239 * information. We ensure this by dropping it only if the enqueued
1240 * packet is consecutive with the eligible ACK, and their flags match.
1242 if (elig_ack && aggressive && elig_ack->next == skb &&
1243 (elig_flags == (tcp_flag_word(tcph) &
1244 (TCP_FLAG_ECE | TCP_FLAG_CWR))))
1251 elig_ack_prev->next = elig_ack->next;
1253 flow->head = elig_ack->next;
1255 skb_mark_not_on_list(elig_ack);
1260 static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
1262 avg -= avg >> shift;
1263 avg += sample >> shift;
1267 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
1269 if (q->rate_flags & CAKE_FLAG_OVERHEAD)
1272 if (q->max_netlen < len)
1273 q->max_netlen = len;
1274 if (q->min_netlen > len)
1275 q->min_netlen = len;
1277 len += q->rate_overhead;
1279 if (len < q->rate_mpu)
1282 if (q->atm_mode == CAKE_ATM_ATM) {
1286 } else if (q->atm_mode == CAKE_ATM_PTM) {
1287 /* Add one byte per 64 bytes or part thereof.
1288 * This is conservative and easier to calculate than the
1291 len += (len + 63) / 64;
1294 if (q->max_adjlen < len)
1295 q->max_adjlen = len;
1296 if (q->min_adjlen > len)
1297 q->min_adjlen = len;
1302 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
1304 const struct skb_shared_info *shinfo = skb_shinfo(skb);
1305 unsigned int hdr_len, last_len = 0;
1306 u32 off = skb_network_offset(skb);
1307 u32 len = qdisc_pkt_len(skb);
1310 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
1312 if (!shinfo->gso_size)
1313 return cake_calc_overhead(q, len, off);
1315 /* borrowed from qdisc_pkt_len_init() */
1316 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1318 /* + transport layer */
1319 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
1321 const struct tcphdr *th;
1322 struct tcphdr _tcphdr;
1324 th = skb_header_pointer(skb, skb_transport_offset(skb),
1325 sizeof(_tcphdr), &_tcphdr);
1327 hdr_len += __tcp_hdrlen(th);
1329 struct udphdr _udphdr;
1331 if (skb_header_pointer(skb, skb_transport_offset(skb),
1332 sizeof(_udphdr), &_udphdr))
1333 hdr_len += sizeof(struct udphdr);
1336 if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
1337 segs = DIV_ROUND_UP(skb->len - hdr_len,
1340 segs = shinfo->gso_segs;
1342 len = shinfo->gso_size + hdr_len;
1343 last_len = skb->len - shinfo->gso_size * (segs - 1);
1345 return (cake_calc_overhead(q, len, off) * (segs - 1) +
1346 cake_calc_overhead(q, last_len, off));
1349 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
1351 struct cake_heap_entry ii = q->overflow_heap[i];
1352 struct cake_heap_entry jj = q->overflow_heap[j];
1354 q->overflow_heap[i] = jj;
1355 q->overflow_heap[j] = ii;
1357 q->tins[ii.t].overflow_idx[ii.b] = j;
1358 q->tins[jj.t].overflow_idx[jj.b] = i;
1361 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
1363 struct cake_heap_entry ii = q->overflow_heap[i];
1365 return q->tins[ii.t].backlogs[ii.b];
1368 static void cake_heapify(struct cake_sched_data *q, u16 i)
1370 static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
1371 u32 mb = cake_heap_get_backlog(q, i);
1379 u32 lb = cake_heap_get_backlog(q, l);
1388 u32 rb = cake_heap_get_backlog(q, r);
1397 cake_heap_swap(q, i, m);
1405 static void cake_heapify_up(struct cake_sched_data *q, u16 i)
1407 while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
1408 u16 p = (i - 1) >> 1;
1409 u32 ib = cake_heap_get_backlog(q, i);
1410 u32 pb = cake_heap_get_backlog(q, p);
1413 cake_heap_swap(q, i, p);
1421 static int cake_advance_shaper(struct cake_sched_data *q,
1422 struct cake_tin_data *b,
1423 struct sk_buff *skb,
1424 ktime_t now, bool drop)
1426 u32 len = get_cobalt_cb(skb)->adjusted_len;
1428 /* charge packet bandwidth to this tin
1429 * and to the global shaper.
1432 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
1433 u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
1434 u64 failsafe_dur = global_dur + (global_dur >> 1);
1436 if (ktime_before(b->time_next_packet, now))
1437 b->time_next_packet = ktime_add_ns(b->time_next_packet,
1440 else if (ktime_before(b->time_next_packet,
1441 ktime_add_ns(now, tin_dur)))
1442 b->time_next_packet = ktime_add_ns(now, tin_dur);
1444 q->time_next_packet = ktime_add_ns(q->time_next_packet,
1447 q->failsafe_next_packet = \
1448 ktime_add_ns(q->failsafe_next_packet,
1454 static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1456 struct cake_sched_data *q = qdisc_priv(sch);
1457 ktime_t now = ktime_get();
1458 u32 idx = 0, tin = 0, len;
1459 struct cake_heap_entry qq;
1460 struct cake_tin_data *b;
1461 struct cake_flow *flow;
1462 struct sk_buff *skb;
1464 if (!q->overflow_timeout) {
1466 /* Build fresh max-heap */
1467 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
1470 q->overflow_timeout = 65535;
1472 /* select longest queue for pruning */
1473 qq = q->overflow_heap[0];
1478 flow = &b->flows[idx];
1479 skb = dequeue_head(flow);
1480 if (unlikely(!skb)) {
1481 /* heap has gone wrong, rebuild it next time */
1482 q->overflow_timeout = 0;
1483 return idx + (tin << 16);
1486 if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
1487 b->unresponsive_flow_count++;
1489 len = qdisc_pkt_len(skb);
1490 q->buffer_used -= skb->truesize;
1491 b->backlogs[idx] -= len;
1492 b->tin_backlog -= len;
1493 sch->qstats.backlog -= len;
1494 qdisc_tree_reduce_backlog(sch, 1, len);
1498 sch->qstats.drops++;
1500 if (q->rate_flags & CAKE_FLAG_INGRESS)
1501 cake_advance_shaper(q, b, skb, now, true);
1503 __qdisc_drop(skb, to_free);
1508 return idx + (tin << 16);
1511 static void cake_wash_diffserv(struct sk_buff *skb)
1513 switch (skb->protocol) {
1514 case htons(ETH_P_IP):
1515 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1517 case htons(ETH_P_IPV6):
1518 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
1525 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
1529 switch (skb->protocol) {
1530 case htons(ETH_P_IP):
1531 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
1533 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1536 case htons(ETH_P_IPV6):
1537 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
1539 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
1542 case htons(ETH_P_ARP):
1543 return 0x38; /* CS7 - Net Control */
1546 /* If there is no Diffserv field, treat as best-effort */
1551 static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1552 struct sk_buff *skb)
1554 struct cake_sched_data *q = qdisc_priv(sch);
1557 if (TC_H_MAJ(skb->priority) == sch->handle &&
1558 TC_H_MIN(skb->priority) > 0 &&
1559 TC_H_MIN(skb->priority) <= q->tin_cnt) {
1560 tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
1562 if (q->rate_flags & CAKE_FLAG_WASH)
1563 cake_wash_diffserv(skb);
1564 } else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
1565 /* extract the Diffserv Precedence field, if it exists */
1566 /* and clear DSCP bits if washing */
1567 tin = q->tin_index[cake_handle_diffserv(skb,
1568 q->rate_flags & CAKE_FLAG_WASH)];
1569 if (unlikely(tin >= q->tin_cnt))
1573 if (q->rate_flags & CAKE_FLAG_WASH)
1574 cake_wash_diffserv(skb);
1577 return &q->tins[tin];
1580 static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1581 struct sk_buff *skb, int flow_mode, int *qerr)
1583 struct cake_sched_data *q = qdisc_priv(sch);
1584 struct tcf_proto *filter;
1585 struct tcf_result res;
1586 u16 flow = 0, host = 0;
1589 filter = rcu_dereference_bh(q->filter_list);
1593 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1594 result = tcf_classify(skb, filter, &res, false);
1597 #ifdef CONFIG_NET_CLS_ACT
1602 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1608 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1609 flow = TC_H_MIN(res.classid);
1610 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
1611 host = TC_H_MAJ(res.classid) >> 16;
1614 *t = cake_select_tin(sch, skb);
1615 return cake_hash(*t, skb, flow_mode, flow, host) + 1;
1618 static void cake_reconfigure(struct Qdisc *sch);
1620 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1621 struct sk_buff **to_free)
1623 struct cake_sched_data *q = qdisc_priv(sch);
1624 int len = qdisc_pkt_len(skb);
1625 int uninitialized_var(ret);
1626 struct sk_buff *ack = NULL;
1627 ktime_t now = ktime_get();
1628 struct cake_tin_data *b;
1629 struct cake_flow *flow;
1632 /* choose flow to insert into */
1633 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
1635 if (ret & __NET_XMIT_BYPASS)
1636 qdisc_qstats_drop(sch);
1637 __qdisc_drop(skb, to_free);
1641 flow = &b->flows[idx];
1643 /* ensure shaper state isn't stale */
1644 if (!b->tin_backlog) {
1645 if (ktime_before(b->time_next_packet, now))
1646 b->time_next_packet = now;
1649 if (ktime_before(q->time_next_packet, now)) {
1650 q->failsafe_next_packet = now;
1651 q->time_next_packet = now;
1652 } else if (ktime_after(q->time_next_packet, now) &&
1653 ktime_after(q->failsafe_next_packet, now)) {
1655 min(ktime_to_ns(q->time_next_packet),
1657 q->failsafe_next_packet));
1658 sch->qstats.overlimits++;
1659 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1664 if (unlikely(len > b->max_skblen))
1665 b->max_skblen = len;
1667 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1668 struct sk_buff *segs, *nskb;
1669 netdev_features_t features = netif_skb_features(skb);
1670 unsigned int slen = 0, numsegs = 0;
1672 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1673 if (IS_ERR_OR_NULL(segs))
1674 return qdisc_drop(skb, sch, to_free);
1678 skb_mark_not_on_list(segs);
1679 qdisc_skb_cb(segs)->pkt_len = segs->len;
1680 cobalt_set_enqueue_time(segs, now);
1681 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
1683 flow_queue_add(flow, segs);
1688 q->buffer_used += segs->truesize;
1695 b->backlogs[idx] += slen;
1696 b->tin_backlog += slen;
1697 sch->qstats.backlog += slen;
1698 q->avg_window_bytes += slen;
1700 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1704 cobalt_set_enqueue_time(skb, now);
1705 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
1706 flow_queue_add(flow, skb);
1709 ack = cake_ack_filter(q, flow);
1713 sch->qstats.drops++;
1714 b->bytes += qdisc_pkt_len(ack);
1715 len -= qdisc_pkt_len(ack);
1716 q->buffer_used += skb->truesize - ack->truesize;
1717 if (q->rate_flags & CAKE_FLAG_INGRESS)
1718 cake_advance_shaper(q, b, ack, now, true);
1720 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1724 q->buffer_used += skb->truesize;
1730 b->backlogs[idx] += len;
1731 b->tin_backlog += len;
1732 sch->qstats.backlog += len;
1733 q->avg_window_bytes += len;
1736 if (q->overflow_timeout)
1737 cake_heapify_up(q, b->overflow_idx[idx]);
1739 /* incoming bandwidth capacity estimate */
1740 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
1741 u64 packet_interval = \
1742 ktime_to_ns(ktime_sub(now, q->last_packet_time));
1744 if (packet_interval > NSEC_PER_SEC)
1745 packet_interval = NSEC_PER_SEC;
1747 /* filter out short-term bursts, eg. wifi aggregation */
1748 q->avg_packet_interval = \
1749 cake_ewma(q->avg_packet_interval,
1751 (packet_interval > q->avg_packet_interval ?
1754 q->last_packet_time = now;
1756 if (packet_interval > q->avg_packet_interval) {
1757 u64 window_interval = \
1758 ktime_to_ns(ktime_sub(now,
1759 q->avg_window_begin));
1760 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1762 do_div(b, window_interval);
1763 q->avg_peak_bandwidth =
1764 cake_ewma(q->avg_peak_bandwidth, b,
1765 b > q->avg_peak_bandwidth ? 2 : 8);
1766 q->avg_window_bytes = 0;
1767 q->avg_window_begin = now;
1769 if (ktime_after(now,
1770 ktime_add_ms(q->last_reconfig_time,
1772 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1773 cake_reconfigure(sch);
1777 q->avg_window_bytes = 0;
1778 q->last_packet_time = now;
1782 if (!flow->set || flow->set == CAKE_SET_DECAYING) {
1783 struct cake_host *srchost = &b->hosts[flow->srchost];
1784 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1788 list_add_tail(&flow->flowchain, &b->new_flows);
1790 b->decaying_flow_count--;
1791 list_move_tail(&flow->flowchain, &b->new_flows);
1793 flow->set = CAKE_SET_SPARSE;
1794 b->sparse_flow_count++;
1796 if (cake_dsrc(q->flow_mode))
1797 host_load = max(host_load, srchost->srchost_refcnt);
1799 if (cake_ddst(q->flow_mode))
1800 host_load = max(host_load, dsthost->dsthost_refcnt);
1802 flow->deficit = (b->flow_quantum *
1803 quantum_div[host_load]) >> 16;
1804 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1805 /* this flow was empty, accounted as a sparse flow, but actually
1806 * in the bulk rotation.
1808 flow->set = CAKE_SET_BULK;
1809 b->sparse_flow_count--;
1810 b->bulk_flow_count++;
1813 if (q->buffer_used > q->buffer_max_used)
1814 q->buffer_max_used = q->buffer_used;
1816 if (q->buffer_used > q->buffer_limit) {
1819 while (q->buffer_used > q->buffer_limit) {
1821 cake_drop(sch, to_free);
1823 b->drop_overlimit += dropped;
1825 return NET_XMIT_SUCCESS;
1828 static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
1830 struct cake_sched_data *q = qdisc_priv(sch);
1831 struct cake_tin_data *b = &q->tins[q->cur_tin];
1832 struct cake_flow *flow = &b->flows[q->cur_flow];
1833 struct sk_buff *skb = NULL;
1837 skb = dequeue_head(flow);
1838 len = qdisc_pkt_len(skb);
1839 b->backlogs[q->cur_flow] -= len;
1840 b->tin_backlog -= len;
1841 sch->qstats.backlog -= len;
1842 q->buffer_used -= skb->truesize;
1845 if (q->overflow_timeout)
1846 cake_heapify(q, b->overflow_idx[q->cur_flow]);
1851 /* Discard leftover packets from a tin no longer in use. */
1852 static void cake_clear_tin(struct Qdisc *sch, u16 tin)
1854 struct cake_sched_data *q = qdisc_priv(sch);
1855 struct sk_buff *skb;
1858 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
1859 while (!!(skb = cake_dequeue_one(sch)))
1863 static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1865 struct cake_sched_data *q = qdisc_priv(sch);
1866 struct cake_tin_data *b = &q->tins[q->cur_tin];
1867 struct cake_host *srchost, *dsthost;
1868 ktime_t now = ktime_get();
1869 struct cake_flow *flow;
1870 struct list_head *head;
1871 bool first_flow = true;
1872 struct sk_buff *skb;
1881 /* global hard shaper */
1882 if (ktime_after(q->time_next_packet, now) &&
1883 ktime_after(q->failsafe_next_packet, now)) {
1884 u64 next = min(ktime_to_ns(q->time_next_packet),
1885 ktime_to_ns(q->failsafe_next_packet));
1887 sch->qstats.overlimits++;
1888 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1892 /* Choose a class to work on. */
1894 /* In unlimited mode, can't rely on shaper timings, just balance
1897 bool wrapped = false, empty = true;
1899 while (b->tin_deficit < 0 ||
1900 !(b->sparse_flow_count + b->bulk_flow_count)) {
1901 if (b->tin_deficit <= 0)
1902 b->tin_deficit += b->tin_quantum_band;
1903 if (b->sparse_flow_count + b->bulk_flow_count)
1908 if (q->cur_tin >= q->tin_cnt) {
1913 /* It's possible for q->qlen to be
1914 * nonzero when we actually have no
1925 /* In shaped mode, choose:
1926 * - Highest-priority tin with queue and meeting schedule, or
1927 * - The earliest-scheduled tin with queue.
1929 ktime_t best_time = KTIME_MAX;
1930 int tin, best_tin = 0;
1932 for (tin = 0; tin < q->tin_cnt; tin++) {
1934 if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
1935 ktime_t time_to_pkt = \
1936 ktime_sub(b->time_next_packet, now);
1938 if (ktime_to_ns(time_to_pkt) <= 0 ||
1939 ktime_compare(time_to_pkt,
1941 best_time = time_to_pkt;
1947 q->cur_tin = best_tin;
1948 b = q->tins + best_tin;
1950 /* No point in going further if no packets to deliver. */
1951 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
1956 /* service this class */
1957 head = &b->decaying_flows;
1958 if (!first_flow || list_empty(head)) {
1959 head = &b->new_flows;
1960 if (list_empty(head)) {
1961 head = &b->old_flows;
1962 if (unlikely(list_empty(head))) {
1963 head = &b->decaying_flows;
1964 if (unlikely(list_empty(head)))
1969 flow = list_first_entry(head, struct cake_flow, flowchain);
1970 q->cur_flow = flow - b->flows;
1973 /* triple isolation (modified DRR++) */
1974 srchost = &b->hosts[flow->srchost];
1975 dsthost = &b->hosts[flow->dsthost];
1978 if (cake_dsrc(q->flow_mode))
1979 host_load = max(host_load, srchost->srchost_refcnt);
1981 if (cake_ddst(q->flow_mode))
1982 host_load = max(host_load, dsthost->dsthost_refcnt);
1984 WARN_ON(host_load > CAKE_QUEUES);
1986 /* flow isolation (DRR++) */
1987 if (flow->deficit <= 0) {
1988 /* The shifted prandom_u32() is a way to apply dithering to
1989 * avoid accumulating roundoff errors
1991 flow->deficit += (b->flow_quantum * quantum_div[host_load] +
1992 (prandom_u32() >> 16)) >> 16;
1993 list_move_tail(&flow->flowchain, &b->old_flows);
1995 /* Keep all flows with deficits out of the sparse and decaying
1996 * rotations. No non-empty flow can go into the decaying
1997 * rotation, so they can't get deficits
1999 if (flow->set == CAKE_SET_SPARSE) {
2001 b->sparse_flow_count--;
2002 b->bulk_flow_count++;
2003 flow->set = CAKE_SET_BULK;
2005 /* we've moved it to the bulk rotation for
2006 * correct deficit accounting but we still want
2007 * to count it as a sparse flow, not a bulk one.
2009 flow->set = CAKE_SET_SPARSE_WAIT;
2015 /* Retrieve a packet via the AQM */
2017 skb = cake_dequeue_one(sch);
2019 /* this queue was actually empty */
2020 if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
2021 b->unresponsive_flow_count--;
2023 if (flow->cvars.p_drop || flow->cvars.count ||
2024 ktime_before(now, flow->cvars.drop_next)) {
2025 /* keep in the flowchain until the state has
2028 list_move_tail(&flow->flowchain,
2029 &b->decaying_flows);
2030 if (flow->set == CAKE_SET_BULK) {
2031 b->bulk_flow_count--;
2032 b->decaying_flow_count++;
2033 } else if (flow->set == CAKE_SET_SPARSE ||
2034 flow->set == CAKE_SET_SPARSE_WAIT) {
2035 b->sparse_flow_count--;
2036 b->decaying_flow_count++;
2038 flow->set = CAKE_SET_DECAYING;
2040 /* remove empty queue from the flowchain */
2041 list_del_init(&flow->flowchain);
2042 if (flow->set == CAKE_SET_SPARSE ||
2043 flow->set == CAKE_SET_SPARSE_WAIT)
2044 b->sparse_flow_count--;
2045 else if (flow->set == CAKE_SET_BULK)
2046 b->bulk_flow_count--;
2048 b->decaying_flow_count--;
2050 flow->set = CAKE_SET_NONE;
2051 srchost->srchost_refcnt--;
2052 dsthost->dsthost_refcnt--;
2057 /* Last packet in queue may be marked, shouldn't be dropped */
2058 if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
2059 (b->bulk_flow_count *
2061 CAKE_FLAG_INGRESS))) ||
2065 /* drop this packet, get another one */
2066 if (q->rate_flags & CAKE_FLAG_INGRESS) {
2067 len = cake_advance_shaper(q, b, skb,
2069 flow->deficit -= len;
2070 b->tin_deficit -= len;
2074 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
2075 qdisc_qstats_drop(sch);
2077 if (q->rate_flags & CAKE_FLAG_INGRESS)
2081 b->tin_ecn_mark += !!flow->cvars.ecn_marked;
2082 qdisc_bstats_update(sch, skb);
2084 /* collect delay stats */
2085 delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
2086 b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
2087 b->peak_delay = cake_ewma(b->peak_delay, delay,
2088 delay > b->peak_delay ? 2 : 8);
2089 b->base_delay = cake_ewma(b->base_delay, delay,
2090 delay < b->base_delay ? 2 : 8);
2092 len = cake_advance_shaper(q, b, skb, now, false);
2093 flow->deficit -= len;
2094 b->tin_deficit -= len;
2096 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
2097 u64 next = min(ktime_to_ns(q->time_next_packet),
2098 ktime_to_ns(q->failsafe_next_packet));
2100 qdisc_watchdog_schedule_ns(&q->watchdog, next);
2101 } else if (!sch->q.qlen) {
2104 for (i = 0; i < q->tin_cnt; i++) {
2105 if (q->tins[i].decaying_flow_count) {
2108 q->tins[i].cparams.target);
2110 qdisc_watchdog_schedule_ns(&q->watchdog,
2117 if (q->overflow_timeout)
2118 q->overflow_timeout--;
2123 static void cake_reset(struct Qdisc *sch)
2127 for (c = 0; c < CAKE_MAX_TINS; c++)
2128 cake_clear_tin(sch, c);
2131 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2132 [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
2133 [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
2134 [TCA_CAKE_ATM] = { .type = NLA_U32 },
2135 [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
2136 [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
2137 [TCA_CAKE_RTT] = { .type = NLA_U32 },
2138 [TCA_CAKE_TARGET] = { .type = NLA_U32 },
2139 [TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
2140 [TCA_CAKE_MEMORY] = { .type = NLA_U32 },
2141 [TCA_CAKE_NAT] = { .type = NLA_U32 },
2142 [TCA_CAKE_RAW] = { .type = NLA_U32 },
2143 [TCA_CAKE_WASH] = { .type = NLA_U32 },
2144 [TCA_CAKE_MPU] = { .type = NLA_U32 },
2145 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
2146 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
2149 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
2150 u64 target_ns, u64 rtt_est_ns)
2152 /* convert byte-rate into time-per-byte
2153 * so it will always unwedge in reasonable time.
2155 static const u64 MIN_RATE = 64;
2156 u32 byte_target = mtu;
2161 b->flow_quantum = 1514;
2163 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
2165 rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
2166 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
2167 while (!!(rate_ns >> 34)) {
2171 } /* else unlimited, ie. zero delay */
2173 b->tin_rate_bps = rate;
2174 b->tin_rate_ns = rate_ns;
2175 b->tin_rate_shft = rate_shft;
2177 byte_target_ns = (byte_target * rate_ns) >> rate_shft;
2179 b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
2180 b->cparams.interval = max(rtt_est_ns +
2181 b->cparams.target - target_ns,
2182 b->cparams.target * 2);
2183 b->cparams.mtu_time = byte_target_ns;
2184 b->cparams.p_inc = 1 << 24; /* 1/256 */
2185 b->cparams.p_dec = 1 << 20; /* 1/4096 */
2188 static int cake_config_besteffort(struct Qdisc *sch)
2190 struct cake_sched_data *q = qdisc_priv(sch);
2191 struct cake_tin_data *b = &q->tins[0];
2192 u32 mtu = psched_mtu(qdisc_dev(sch));
2193 u64 rate = q->rate_bps;
2197 q->tin_index = besteffort;
2198 q->tin_order = normal_order;
2200 cake_set_rate(b, rate, mtu,
2201 us_to_ns(q->target), us_to_ns(q->interval));
2202 b->tin_quantum_band = 65535;
2203 b->tin_quantum_prio = 65535;
2208 static int cake_config_precedence(struct Qdisc *sch)
2210 /* convert high-level (user visible) parameters into internal format */
2211 struct cake_sched_data *q = qdisc_priv(sch);
2212 u32 mtu = psched_mtu(qdisc_dev(sch));
2213 u64 rate = q->rate_bps;
2219 q->tin_index = precedence;
2220 q->tin_order = normal_order;
2222 for (i = 0; i < q->tin_cnt; i++) {
2223 struct cake_tin_data *b = &q->tins[i];
2225 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2226 us_to_ns(q->interval));
2228 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2229 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2231 /* calculate next class's parameters */
2245 /* List of known Diffserv codepoints:
2247 * Least Effort (CS1)
2249 * Max Reliability & LLT "Lo" (TOS1)
2250 * Max Throughput (TOS2)
2253 * Assured Forwarding 1 (AF1x) - x3
2254 * Assured Forwarding 2 (AF2x) - x3
2255 * Assured Forwarding 3 (AF3x) - x3
2256 * Assured Forwarding 4 (AF4x) - x3
2257 * Precedence Class 2 (CS2)
2258 * Precedence Class 3 (CS3)
2259 * Precedence Class 4 (CS4)
2260 * Precedence Class 5 (CS5)
2261 * Precedence Class 6 (CS6)
2262 * Precedence Class 7 (CS7)
2264 * Expedited Forwarding (EF)
2266 * Total 25 codepoints.
2269 /* List of traffic classes in RFC 4594:
2270 * (roughly descending order of contended priority)
2271 * (roughly ascending order of uncontended throughput)
2273 * Network Control (CS6,CS7) - routing traffic
2274 * Telephony (EF,VA) - aka. VoIP streams
2275 * Signalling (CS5) - VoIP setup
2276 * Multimedia Conferencing (AF4x) - aka. video calls
2277 * Realtime Interactive (CS4) - eg. games
2278 * Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
2279 * Broadcast Video (CS3)
2280 * Low Latency Data (AF2x,TOS4) - eg. database
2281 * Ops, Admin, Management (CS2,TOS1) - eg. ssh
2282 * Standard Service (CS0 & unrecognised codepoints)
2283 * High Throughput Data (AF1x,TOS2) - eg. web traffic
2284 * Low Priority Data (CS1) - eg. BitTorrent
2286 * Total 12 traffic classes.
2289 static int cake_config_diffserv8(struct Qdisc *sch)
2291 /* Pruned list of traffic classes for typical applications:
2293 * Network Control (CS6, CS7)
2294 * Minimum Latency (EF, VA, CS5, CS4)
2295 * Interactive Shell (CS2, TOS1)
2296 * Low Latency Transactions (AF2x, TOS4)
2297 * Video Streaming (AF4x, AF3x, CS3)
2298 * Bog Standard (CS0 etc.)
2299 * High Throughput (AF1x, TOS2)
2300 * Background Traffic (CS1)
2302 * Total 8 traffic classes.
2305 struct cake_sched_data *q = qdisc_priv(sch);
2306 u32 mtu = psched_mtu(qdisc_dev(sch));
2307 u64 rate = q->rate_bps;
2314 /* codepoint to class mapping */
2315 q->tin_index = diffserv8;
2316 q->tin_order = normal_order;
2318 /* class characteristics */
2319 for (i = 0; i < q->tin_cnt; i++) {
2320 struct cake_tin_data *b = &q->tins[i];
2322 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2323 us_to_ns(q->interval));
2325 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2326 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2328 /* calculate next class's parameters */
2342 static int cake_config_diffserv4(struct Qdisc *sch)
2344 /* Further pruned list of traffic classes for four-class system:
2346 * Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
2347 * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
2348 * Best Effort (CS0, AF1x, TOS2, and those not specified)
2349 * Background Traffic (CS1)
2351 * Total 4 traffic classes.
2354 struct cake_sched_data *q = qdisc_priv(sch);
2355 u32 mtu = psched_mtu(qdisc_dev(sch));
2356 u64 rate = q->rate_bps;
2361 /* codepoint to class mapping */
2362 q->tin_index = diffserv4;
2363 q->tin_order = bulk_order;
2365 /* class characteristics */
2366 cake_set_rate(&q->tins[0], rate, mtu,
2367 us_to_ns(q->target), us_to_ns(q->interval));
2368 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2369 us_to_ns(q->target), us_to_ns(q->interval));
2370 cake_set_rate(&q->tins[2], rate >> 1, mtu,
2371 us_to_ns(q->target), us_to_ns(q->interval));
2372 cake_set_rate(&q->tins[3], rate >> 2, mtu,
2373 us_to_ns(q->target), us_to_ns(q->interval));
2375 /* priority weights */
2376 q->tins[0].tin_quantum_prio = quantum;
2377 q->tins[1].tin_quantum_prio = quantum >> 4;
2378 q->tins[2].tin_quantum_prio = quantum << 2;
2379 q->tins[3].tin_quantum_prio = quantum << 4;
2381 /* bandwidth-sharing weights */
2382 q->tins[0].tin_quantum_band = quantum;
2383 q->tins[1].tin_quantum_band = quantum >> 4;
2384 q->tins[2].tin_quantum_band = quantum >> 1;
2385 q->tins[3].tin_quantum_band = quantum >> 2;
2390 static int cake_config_diffserv3(struct Qdisc *sch)
2392 /* Simplified Diffserv structure with 3 tins.
2393 * Low Priority (CS1)
2395 * Latency Sensitive (TOS4, VA, EF, CS6, CS7)
2397 struct cake_sched_data *q = qdisc_priv(sch);
2398 u32 mtu = psched_mtu(qdisc_dev(sch));
2399 u64 rate = q->rate_bps;
2404 /* codepoint to class mapping */
2405 q->tin_index = diffserv3;
2406 q->tin_order = bulk_order;
2408 /* class characteristics */
2409 cake_set_rate(&q->tins[0], rate, mtu,
2410 us_to_ns(q->target), us_to_ns(q->interval));
2411 cake_set_rate(&q->tins[1], rate >> 4, mtu,
2412 us_to_ns(q->target), us_to_ns(q->interval));
2413 cake_set_rate(&q->tins[2], rate >> 2, mtu,
2414 us_to_ns(q->target), us_to_ns(q->interval));
2416 /* priority weights */
2417 q->tins[0].tin_quantum_prio = quantum;
2418 q->tins[1].tin_quantum_prio = quantum >> 4;
2419 q->tins[2].tin_quantum_prio = quantum << 4;
2421 /* bandwidth-sharing weights */
2422 q->tins[0].tin_quantum_band = quantum;
2423 q->tins[1].tin_quantum_band = quantum >> 4;
2424 q->tins[2].tin_quantum_band = quantum >> 2;
2429 static void cake_reconfigure(struct Qdisc *sch)
2431 struct cake_sched_data *q = qdisc_priv(sch);
2434 switch (q->tin_mode) {
2435 case CAKE_DIFFSERV_BESTEFFORT:
2436 ft = cake_config_besteffort(sch);
2439 case CAKE_DIFFSERV_PRECEDENCE:
2440 ft = cake_config_precedence(sch);
2443 case CAKE_DIFFSERV_DIFFSERV8:
2444 ft = cake_config_diffserv8(sch);
2447 case CAKE_DIFFSERV_DIFFSERV4:
2448 ft = cake_config_diffserv4(sch);
2451 case CAKE_DIFFSERV_DIFFSERV3:
2453 ft = cake_config_diffserv3(sch);
2457 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
2458 cake_clear_tin(sch, c);
2459 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
2462 q->rate_ns = q->tins[ft].tin_rate_ns;
2463 q->rate_shft = q->tins[ft].tin_rate_shft;
2465 if (q->buffer_config_limit) {
2466 q->buffer_limit = q->buffer_config_limit;
2467 } else if (q->rate_bps) {
2468 u64 t = q->rate_bps * q->interval;
2470 do_div(t, USEC_PER_SEC / 4);
2471 q->buffer_limit = max_t(u32, t, 4U << 20);
2473 q->buffer_limit = ~0;
2476 sch->flags &= ~TCQ_F_CAN_BYPASS;
2478 q->buffer_limit = min(q->buffer_limit,
2479 max(sch->limit * psched_mtu(qdisc_dev(sch)),
2480 q->buffer_config_limit));
2483 static int cake_change(struct Qdisc *sch, struct nlattr *opt,
2484 struct netlink_ext_ack *extack)
2486 struct cake_sched_data *q = qdisc_priv(sch);
2487 struct nlattr *tb[TCA_CAKE_MAX + 1];
2493 err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack);
2497 if (tb[TCA_CAKE_NAT]) {
2498 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
2499 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
2500 q->flow_mode |= CAKE_FLOW_NAT_FLAG *
2501 !!nla_get_u32(tb[TCA_CAKE_NAT]);
2503 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
2504 "No conntrack support in kernel");
2509 if (tb[TCA_CAKE_BASE_RATE64])
2510 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
2512 if (tb[TCA_CAKE_DIFFSERV_MODE])
2513 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
2515 if (tb[TCA_CAKE_WASH]) {
2516 if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
2517 q->rate_flags |= CAKE_FLAG_WASH;
2519 q->rate_flags &= ~CAKE_FLAG_WASH;
2522 if (tb[TCA_CAKE_FLOW_MODE])
2523 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
2524 (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
2527 if (tb[TCA_CAKE_ATM])
2528 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
2530 if (tb[TCA_CAKE_OVERHEAD]) {
2531 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
2532 q->rate_flags |= CAKE_FLAG_OVERHEAD;
2540 if (tb[TCA_CAKE_RAW]) {
2541 q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
2549 if (tb[TCA_CAKE_MPU])
2550 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
2552 if (tb[TCA_CAKE_RTT]) {
2553 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
2559 if (tb[TCA_CAKE_TARGET]) {
2560 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
2566 if (tb[TCA_CAKE_AUTORATE]) {
2567 if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
2568 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
2570 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
2573 if (tb[TCA_CAKE_INGRESS]) {
2574 if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
2575 q->rate_flags |= CAKE_FLAG_INGRESS;
2577 q->rate_flags &= ~CAKE_FLAG_INGRESS;
2580 if (tb[TCA_CAKE_ACK_FILTER])
2581 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
2583 if (tb[TCA_CAKE_MEMORY])
2584 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
2586 if (tb[TCA_CAKE_SPLIT_GSO]) {
2587 if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
2588 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2590 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
2595 cake_reconfigure(sch);
2596 sch_tree_unlock(sch);
2602 static void cake_destroy(struct Qdisc *sch)
2604 struct cake_sched_data *q = qdisc_priv(sch);
2606 qdisc_watchdog_cancel(&q->watchdog);
2607 tcf_block_put(q->block);
2611 static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2612 struct netlink_ext_ack *extack)
2614 struct cake_sched_data *q = qdisc_priv(sch);
2618 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
2619 q->flow_mode = CAKE_FLOW_TRIPLE;
2621 q->rate_bps = 0; /* unlimited by default */
2623 q->interval = 100000; /* 100ms default */
2624 q->target = 5000; /* 5ms: codel RFC argues
2625 * for 5 to 10% of interval
2627 q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2631 qdisc_watchdog_init(&q->watchdog, sch);
2634 int err = cake_change(sch, opt, extack);
2640 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2644 quantum_div[0] = ~0;
2645 for (i = 1; i <= CAKE_QUEUES; i++)
2646 quantum_div[i] = 65535 / i;
2648 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
2653 for (i = 0; i < CAKE_MAX_TINS; i++) {
2654 struct cake_tin_data *b = q->tins + i;
2656 INIT_LIST_HEAD(&b->new_flows);
2657 INIT_LIST_HEAD(&b->old_flows);
2658 INIT_LIST_HEAD(&b->decaying_flows);
2659 b->sparse_flow_count = 0;
2660 b->bulk_flow_count = 0;
2661 b->decaying_flow_count = 0;
2663 for (j = 0; j < CAKE_QUEUES; j++) {
2664 struct cake_flow *flow = b->flows + j;
2665 u32 k = j * CAKE_MAX_TINS + i;
2667 INIT_LIST_HEAD(&flow->flowchain);
2668 cobalt_vars_init(&flow->cvars);
2670 q->overflow_heap[k].t = i;
2671 q->overflow_heap[k].b = j;
2672 b->overflow_idx[j] = k;
2676 cake_reconfigure(sch);
2677 q->avg_peak_bandwidth = q->rate_bps;
2687 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2689 struct cake_sched_data *q = qdisc_priv(sch);
2690 struct nlattr *opts;
2692 opts = nla_nest_start(skb, TCA_OPTIONS);
2694 goto nla_put_failure;
2696 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
2698 goto nla_put_failure;
2700 if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
2701 q->flow_mode & CAKE_FLOW_MASK))
2702 goto nla_put_failure;
2704 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
2705 goto nla_put_failure;
2707 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
2708 goto nla_put_failure;
2710 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
2711 goto nla_put_failure;
2713 if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
2714 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
2715 goto nla_put_failure;
2717 if (nla_put_u32(skb, TCA_CAKE_INGRESS,
2718 !!(q->rate_flags & CAKE_FLAG_INGRESS)))
2719 goto nla_put_failure;
2721 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
2722 goto nla_put_failure;
2724 if (nla_put_u32(skb, TCA_CAKE_NAT,
2725 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
2726 goto nla_put_failure;
2728 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
2729 goto nla_put_failure;
2731 if (nla_put_u32(skb, TCA_CAKE_WASH,
2732 !!(q->rate_flags & CAKE_FLAG_WASH)))
2733 goto nla_put_failure;
2735 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
2736 goto nla_put_failure;
2738 if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
2739 if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
2740 goto nla_put_failure;
2742 if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
2743 goto nla_put_failure;
2745 if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
2746 goto nla_put_failure;
2748 if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
2749 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
2750 goto nla_put_failure;
2752 return nla_nest_end(skb, opts);
2758 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2760 struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP);
2761 struct cake_sched_data *q = qdisc_priv(sch);
2762 struct nlattr *tstats, *ts;
2768 #define PUT_STAT_U32(attr, data) do { \
2769 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2770 goto nla_put_failure; \
2772 #define PUT_STAT_U64(attr, data) do { \
2773 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
2774 data, TCA_CAKE_STATS_PAD)) \
2775 goto nla_put_failure; \
2778 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
2779 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
2780 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
2781 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
2782 PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
2783 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
2784 PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
2785 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
2790 tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS);
2792 goto nla_put_failure;
2794 #define PUT_TSTAT_U32(attr, data) do { \
2795 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
2796 goto nla_put_failure; \
2798 #define PUT_TSTAT_U64(attr, data) do { \
2799 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
2800 data, TCA_CAKE_TIN_STATS_PAD)) \
2801 goto nla_put_failure; \
2804 for (i = 0; i < q->tin_cnt; i++) {
2805 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
2807 ts = nla_nest_start(d->skb, i + 1);
2809 goto nla_put_failure;
2811 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
2812 PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
2813 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
2815 PUT_TSTAT_U32(TARGET_US,
2816 ktime_to_us(ns_to_ktime(b->cparams.target)));
2817 PUT_TSTAT_U32(INTERVAL_US,
2818 ktime_to_us(ns_to_ktime(b->cparams.interval)));
2820 PUT_TSTAT_U32(SENT_PACKETS, b->packets);
2821 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
2822 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
2823 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
2825 PUT_TSTAT_U32(PEAK_DELAY_US,
2826 ktime_to_us(ns_to_ktime(b->peak_delay)));
2827 PUT_TSTAT_U32(AVG_DELAY_US,
2828 ktime_to_us(ns_to_ktime(b->avge_delay)));
2829 PUT_TSTAT_U32(BASE_DELAY_US,
2830 ktime_to_us(ns_to_ktime(b->base_delay)));
2832 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
2833 PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
2834 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
2836 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
2837 b->decaying_flow_count);
2838 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
2839 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
2840 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
2842 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
2843 nla_nest_end(d->skb, ts);
2846 #undef PUT_TSTAT_U32
2847 #undef PUT_TSTAT_U64
2849 nla_nest_end(d->skb, tstats);
2850 return nla_nest_end(d->skb, stats);
2853 nla_nest_cancel(d->skb, stats);
2857 static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
2862 static unsigned long cake_find(struct Qdisc *sch, u32 classid)
2867 static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
2873 static void cake_unbind(struct Qdisc *q, unsigned long cl)
2877 static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
2878 struct netlink_ext_ack *extack)
2880 struct cake_sched_data *q = qdisc_priv(sch);
2887 static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
2888 struct sk_buff *skb, struct tcmsg *tcm)
2890 tcm->tcm_handle |= TC_H_MIN(cl);
2894 static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2895 struct gnet_dump *d)
2897 struct cake_sched_data *q = qdisc_priv(sch);
2898 const struct cake_flow *flow = NULL;
2899 struct gnet_stats_queue qs = { 0 };
2900 struct nlattr *stats;
2903 if (idx < CAKE_QUEUES * q->tin_cnt) {
2904 const struct cake_tin_data *b = \
2905 &q->tins[q->tin_order[idx / CAKE_QUEUES]];
2906 const struct sk_buff *skb;
2908 flow = &b->flows[idx % CAKE_QUEUES];
2917 sch_tree_unlock(sch);
2919 qs.backlog = b->backlogs[idx % CAKE_QUEUES];
2920 qs.drops = flow->dropped;
2922 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
2925 ktime_t now = ktime_get();
2927 stats = nla_nest_start(d->skb, TCA_STATS_APP);
2931 #define PUT_STAT_U32(attr, data) do { \
2932 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2933 goto nla_put_failure; \
2935 #define PUT_STAT_S32(attr, data) do { \
2936 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2937 goto nla_put_failure; \
2940 PUT_STAT_S32(DEFICIT, flow->deficit);
2941 PUT_STAT_U32(DROPPING, flow->cvars.dropping);
2942 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
2943 PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
2944 if (flow->cvars.p_drop) {
2945 PUT_STAT_S32(BLUE_TIMER_US,
2948 flow->cvars.blue_timer)));
2950 if (flow->cvars.dropping) {
2951 PUT_STAT_S32(DROP_NEXT_US,
2954 flow->cvars.drop_next)));
2957 if (nla_nest_end(d->skb, stats) < 0)
2964 nla_nest_cancel(d->skb, stats);
2968 static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2970 struct cake_sched_data *q = qdisc_priv(sch);
2976 for (i = 0; i < q->tin_cnt; i++) {
2977 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
2979 for (j = 0; j < CAKE_QUEUES; j++) {
2980 if (list_empty(&b->flows[j].flowchain) ||
2981 arg->count < arg->skip) {
2985 if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
2994 static const struct Qdisc_class_ops cake_class_ops = {
2997 .tcf_block = cake_tcf_block,
2998 .bind_tcf = cake_bind,
2999 .unbind_tcf = cake_unbind,
3000 .dump = cake_dump_class,
3001 .dump_stats = cake_dump_class_stats,
3005 static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
3006 .cl_ops = &cake_class_ops,
3008 .priv_size = sizeof(struct cake_sched_data),
3009 .enqueue = cake_enqueue,
3010 .dequeue = cake_dequeue,
3011 .peek = qdisc_peek_dequeued,
3013 .reset = cake_reset,
3014 .destroy = cake_destroy,
3015 .change = cake_change,
3017 .dump_stats = cake_dump_stats,
3018 .owner = THIS_MODULE,
3021 static int __init cake_module_init(void)
3023 return register_qdisc(&cake_qdisc_ops);
3026 static void __exit cake_module_exit(void)
3028 unregister_qdisc(&cake_qdisc_ops);
3031 module_init(cake_module_init)
3032 module_exit(cake_module_exit)
3033 MODULE_AUTHOR("Jonathan Morton");
3034 MODULE_LICENSE("Dual BSD/GPL");
3035 MODULE_DESCRIPTION("The CAKE shaper.");