Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Codel - The Controlled-Delay Active Queue Management algorithm
4 *
5 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
6 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7 *
8 * Implemented on linux by :
9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <linux/prefetch.h>
20#include <net/pkt_sched.h>
21#include <net/codel.h>
22#include <net/codel_impl.h>
23#include <net/codel_qdisc.h>
24
25
26#define DEFAULT_CODEL_LIMIT 1000
27
28struct codel_sched_data {
29 struct codel_params params;
30 struct codel_vars vars;
31 struct codel_stats stats;
32 u32 drop_overlimit;
33};
34
35/* This is the specific function called from codel_dequeue()
36 * to dequeue a packet from queue. Note: backlog is handled in
37 * codel, we dont need to reduce it here.
38 */
39static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
40{
41 struct Qdisc *sch = ctx;
42 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
43
44 if (skb) {
45 sch->qstats.backlog -= qdisc_pkt_len(skb);
46 prefetch(&skb->end); /* we'll need skb_shinfo() */
47 }
48 return skb;
49}
50
51static void drop_func(struct sk_buff *skb, void *ctx)
52{
53 struct Qdisc *sch = ctx;
54
55 qdisc_dequeue_drop(sch, skb, QDISC_DROP_CONGESTED);
56 qdisc_qstats_drop(sch);
57}
58
59static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
60{
61 struct codel_sched_data *q = qdisc_priv(sch);
62 struct sk_buff *skb;
63
64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
65 &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
66 drop_func, dequeue_func);
67
68 if (q->stats.drop_count) {
69 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
70 q->stats.drop_count = 0;
71 q->stats.drop_len = 0;
72 }
73 if (skb)
74 qdisc_bstats_update(sch, skb);
75 return skb;
76}
77
78static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
79 struct sk_buff **to_free)
80{
81 struct codel_sched_data *q;
82
83 if (likely(qdisc_qlen(sch) < sch->limit)) {
84 codel_set_enqueue_time(skb);
85 return qdisc_enqueue_tail(skb, sch);
86 }
87 q = qdisc_priv(sch);
88 WRITE_ONCE(q->drop_overlimit, q->drop_overlimit + 1);
89 return qdisc_drop_reason(skb, sch, to_free, QDISC_DROP_OVERLIMIT);
90}
91
92static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
93 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
94 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
95 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
96 [TCA_CODEL_ECN] = { .type = NLA_U32 },
97 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
98};
99
100static int codel_change(struct Qdisc *sch, struct nlattr *opt,
101 struct netlink_ext_ack *extack)
102{
103 unsigned int dropped_pkts = 0, dropped_bytes = 0;
104 struct codel_sched_data *q = qdisc_priv(sch);
105 struct nlattr *tb[TCA_CODEL_MAX + 1];
106 int err;
107
108 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
109 codel_policy, NULL);
110 if (err < 0)
111 return err;
112
113 sch_tree_lock(sch);
114
115 if (tb[TCA_CODEL_TARGET]) {
116 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
117
118 WRITE_ONCE(q->params.target,
119 ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
120 }
121
122 if (tb[TCA_CODEL_CE_THRESHOLD]) {
123 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
124
125 WRITE_ONCE(q->params.ce_threshold,
126 (val * NSEC_PER_USEC) >> CODEL_SHIFT);
127 }
128
129 if (tb[TCA_CODEL_INTERVAL]) {
130 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
131
132 WRITE_ONCE(q->params.interval,
133 ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
134 }
135
136 if (tb[TCA_CODEL_LIMIT])
137 WRITE_ONCE(sch->limit,
138 nla_get_u32(tb[TCA_CODEL_LIMIT]));
139
140 if (tb[TCA_CODEL_ECN])
141 WRITE_ONCE(q->params.ecn,
142 !!nla_get_u32(tb[TCA_CODEL_ECN]));
143
144 while (sch->q.qlen > sch->limit) {
145 struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
146
147 if (!skb)
148 break;
149
150 dropped_pkts++;
151 dropped_bytes += qdisc_pkt_len(skb);
152 rtnl_qdisc_drop(skb, sch);
153 }
154 qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
155
156 sch_tree_unlock(sch);
157 return 0;
158}
159
160static int codel_init(struct Qdisc *sch, struct nlattr *opt,
161 struct netlink_ext_ack *extack)
162{
163 struct codel_sched_data *q = qdisc_priv(sch);
164
165 sch->limit = DEFAULT_CODEL_LIMIT;
166
167 codel_params_init(&q->params);
168 codel_vars_init(&q->vars);
169 codel_stats_init(&q->stats);
170 q->params.mtu = psched_mtu(qdisc_dev(sch));
171
172 if (opt) {
173 int err = codel_change(sch, opt, extack);
174
175 if (err)
176 return err;
177 }
178
179 if (sch->limit >= 1)
180 sch->flags |= TCQ_F_CAN_BYPASS;
181 else
182 sch->flags &= ~TCQ_F_CAN_BYPASS;
183
184 sch->flags |= TCQ_F_DEQUEUE_DROPS;
185
186 return 0;
187}
188
189static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
190{
191 struct codel_sched_data *q = qdisc_priv(sch);
192 codel_time_t ce_threshold;
193 struct nlattr *opts;
194
195 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
196 if (opts == NULL)
197 goto nla_put_failure;
198
199 if (nla_put_u32(skb, TCA_CODEL_TARGET,
200 codel_time_to_us(READ_ONCE(q->params.target))) ||
201 nla_put_u32(skb, TCA_CODEL_LIMIT,
202 READ_ONCE(sch->limit)) ||
203 nla_put_u32(skb, TCA_CODEL_INTERVAL,
204 codel_time_to_us(READ_ONCE(q->params.interval))) ||
205 nla_put_u32(skb, TCA_CODEL_ECN,
206 READ_ONCE(q->params.ecn)))
207 goto nla_put_failure;
208 ce_threshold = READ_ONCE(q->params.ce_threshold);
209 if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
210 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
211 codel_time_to_us(ce_threshold)))
212 goto nla_put_failure;
213 return nla_nest_end(skb, opts);
214
215nla_put_failure:
216 nla_nest_cancel(skb, opts);
217 return -1;
218}
219
220static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
221{
222 const struct codel_sched_data *q = qdisc_priv(sch);
223 struct tc_codel_xstats st = {
224 .maxpacket = READ_ONCE(q->stats.maxpacket),
225 .count = READ_ONCE(q->vars.count),
226 .lastcount = READ_ONCE(q->vars.lastcount),
227 .drop_overlimit = READ_ONCE(q->drop_overlimit),
228 .ldelay = codel_time_to_us(READ_ONCE(q->vars.ldelay)),
229 .dropping = READ_ONCE(q->vars.dropping),
230 .ecn_mark = READ_ONCE(q->stats.ecn_mark),
231 .ce_mark = READ_ONCE(q->stats.ce_mark),
232 };
233
234 if (st.dropping) {
235 codel_tdiff_t delta = READ_ONCE(q->vars.drop_next) - codel_get_time();
236
237 if (delta >= 0)
238 st.drop_next = codel_time_to_us(delta);
239 else
240 st.drop_next = -codel_time_to_us(-delta);
241 }
242
243 return gnet_stats_copy_app(d, &st, sizeof(st));
244}
245
246static void codel_reset(struct Qdisc *sch)
247{
248 struct codel_sched_data *q = qdisc_priv(sch);
249
250 qdisc_reset_queue(sch);
251 codel_vars_init(&q->vars);
252}
253
254static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
255 .id = "codel",
256 .priv_size = sizeof(struct codel_sched_data),
257
258 .enqueue = codel_qdisc_enqueue,
259 .dequeue = codel_qdisc_dequeue,
260 .peek = qdisc_peek_dequeued,
261 .init = codel_init,
262 .reset = codel_reset,
263 .change = codel_change,
264 .dump = codel_dump,
265 .dump_stats = codel_dump_stats,
266 .owner = THIS_MODULE,
267};
268MODULE_ALIAS_NET_SCH("codel");
269
270static int __init codel_module_init(void)
271{
272 return register_qdisc(&codel_qdisc_ops);
273}
274
275static void __exit codel_module_exit(void)
276{
277 unregister_qdisc(&codel_qdisc_ops);
278}
279
280module_init(codel_module_init)
281module_exit(codel_module_exit)
282
283MODULE_DESCRIPTION("Controlled Delay queue discipline");
284MODULE_AUTHOR("Dave Taht");
285MODULE_AUTHOR("Eric Dumazet");
286MODULE_LICENSE("Dual BSD/GPL");