]> Git Repo - J-linux.git/blob - net/sched/sch_fifo.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / net / sched / sch_fifo.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_fifo.c The simplest FIFO queue.
4  *
5  * Authors:     Alexey Kuznetsov, <[email protected]>
6  */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
16
17 /* 1 band FIFO pseudo-"scheduler" */
18
19 static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20                          struct sk_buff **to_free)
21 {
22         if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
23                    READ_ONCE(sch->limit)))
24                 return qdisc_enqueue_tail(skb, sch);
25
26         return qdisc_drop(skb, sch, to_free);
27 }
28
29 static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
30                          struct sk_buff **to_free)
31 {
32         if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
33                 return qdisc_enqueue_tail(skb, sch);
34
35         return qdisc_drop(skb, sch, to_free);
36 }
37
38 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
39                               struct sk_buff **to_free)
40 {
41         unsigned int prev_backlog;
42
43         if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
44                 return qdisc_enqueue_tail(skb, sch);
45
46         prev_backlog = sch->qstats.backlog;
47         /* queue full, remove one skb to fulfill the limit */
48         __qdisc_queue_drop_head(sch, &sch->q, to_free);
49         qdisc_qstats_drop(sch);
50         qdisc_enqueue_tail(skb, sch);
51
52         qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
53         return NET_XMIT_CN;
54 }
55
56 static void fifo_offload_init(struct Qdisc *sch)
57 {
58         struct net_device *dev = qdisc_dev(sch);
59         struct tc_fifo_qopt_offload qopt;
60
61         if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
62                 return;
63
64         qopt.command = TC_FIFO_REPLACE;
65         qopt.handle = sch->handle;
66         qopt.parent = sch->parent;
67         dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
68 }
69
70 static void fifo_offload_destroy(struct Qdisc *sch)
71 {
72         struct net_device *dev = qdisc_dev(sch);
73         struct tc_fifo_qopt_offload qopt;
74
75         if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
76                 return;
77
78         qopt.command = TC_FIFO_DESTROY;
79         qopt.handle = sch->handle;
80         qopt.parent = sch->parent;
81         dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
82 }
83
84 static int fifo_offload_dump(struct Qdisc *sch)
85 {
86         struct tc_fifo_qopt_offload qopt;
87
88         qopt.command = TC_FIFO_STATS;
89         qopt.handle = sch->handle;
90         qopt.parent = sch->parent;
91         qopt.stats.bstats = &sch->bstats;
92         qopt.stats.qstats = &sch->qstats;
93
94         return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
95 }
96
97 static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
98                        struct netlink_ext_ack *extack)
99 {
100         bool bypass;
101         bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
102
103         if (opt == NULL) {
104                 u32 limit = qdisc_dev(sch)->tx_queue_len;
105
106                 if (is_bfifo)
107                         limit *= psched_mtu(qdisc_dev(sch));
108
109                 WRITE_ONCE(sch->limit, limit);
110         } else {
111                 struct tc_fifo_qopt *ctl = nla_data(opt);
112
113                 if (nla_len(opt) < sizeof(*ctl))
114                         return -EINVAL;
115
116                 WRITE_ONCE(sch->limit, ctl->limit);
117         }
118
119         if (is_bfifo)
120                 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
121         else
122                 bypass = sch->limit >= 1;
123
124         if (bypass)
125                 sch->flags |= TCQ_F_CAN_BYPASS;
126         else
127                 sch->flags &= ~TCQ_F_CAN_BYPASS;
128
129         return 0;
130 }
131
132 static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
133                      struct netlink_ext_ack *extack)
134 {
135         int err;
136
137         err = __fifo_init(sch, opt, extack);
138         if (err)
139                 return err;
140
141         fifo_offload_init(sch);
142         return 0;
143 }
144
145 static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
146                         struct netlink_ext_ack *extack)
147 {
148         return __fifo_init(sch, opt, extack);
149 }
150
151 static void fifo_destroy(struct Qdisc *sch)
152 {
153         fifo_offload_destroy(sch);
154 }
155
156 static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
157 {
158         struct tc_fifo_qopt opt = { .limit = READ_ONCE(sch->limit) };
159
160         if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
161                 goto nla_put_failure;
162         return skb->len;
163
164 nla_put_failure:
165         return -1;
166 }
167
168 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
169 {
170         int err;
171
172         err = fifo_offload_dump(sch);
173         if (err)
174                 return err;
175
176         return __fifo_dump(sch, skb);
177 }
178
179 static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
180 {
181         return __fifo_dump(sch, skb);
182 }
183
184 struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
185         .id             =       "pfifo",
186         .priv_size      =       0,
187         .enqueue        =       pfifo_enqueue,
188         .dequeue        =       qdisc_dequeue_head,
189         .peek           =       qdisc_peek_head,
190         .init           =       fifo_init,
191         .destroy        =       fifo_destroy,
192         .reset          =       qdisc_reset_queue,
193         .change         =       fifo_init,
194         .dump           =       fifo_dump,
195         .owner          =       THIS_MODULE,
196 };
197 EXPORT_SYMBOL(pfifo_qdisc_ops);
198
199 struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
200         .id             =       "bfifo",
201         .priv_size      =       0,
202         .enqueue        =       bfifo_enqueue,
203         .dequeue        =       qdisc_dequeue_head,
204         .peek           =       qdisc_peek_head,
205         .init           =       fifo_init,
206         .destroy        =       fifo_destroy,
207         .reset          =       qdisc_reset_queue,
208         .change         =       fifo_init,
209         .dump           =       fifo_dump,
210         .owner          =       THIS_MODULE,
211 };
212 EXPORT_SYMBOL(bfifo_qdisc_ops);
213
214 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
215         .id             =       "pfifo_head_drop",
216         .priv_size      =       0,
217         .enqueue        =       pfifo_tail_enqueue,
218         .dequeue        =       qdisc_dequeue_head,
219         .peek           =       qdisc_peek_head,
220         .init           =       fifo_hd_init,
221         .reset          =       qdisc_reset_queue,
222         .change         =       fifo_hd_init,
223         .dump           =       fifo_hd_dump,
224         .owner          =       THIS_MODULE,
225 };
226
227 /* Pass size change message down to embedded FIFO */
228 int fifo_set_limit(struct Qdisc *q, unsigned int limit)
229 {
230         struct nlattr *nla;
231         int ret = -ENOMEM;
232
233         /* Hack to avoid sending change message to non-FIFO */
234         if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
235                 return 0;
236
237         if (!q->ops->change)
238                 return 0;
239
240         nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
241         if (nla) {
242                 nla->nla_type = RTM_NEWQDISC;
243                 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
244                 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
245
246                 ret = q->ops->change(q, nla, NULL);
247                 kfree(nla);
248         }
249         return ret;
250 }
251 EXPORT_SYMBOL(fifo_set_limit);
252
253 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
254                                unsigned int limit,
255                                struct netlink_ext_ack *extack)
256 {
257         struct Qdisc *q;
258         int err = -ENOMEM;
259
260         q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
261                               extack);
262         if (q) {
263                 err = fifo_set_limit(q, limit);
264                 if (err < 0) {
265                         qdisc_put(q);
266                         q = NULL;
267                 }
268         }
269
270         return q ? : ERR_PTR(err);
271 }
272 EXPORT_SYMBOL(fifo_create_dflt);
273 MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");
This page took 0.040671 seconds and 4 git commands to generate.