diff options
author | Scott Wood <scottwood@freescale.com> | 2014-04-08 01:00:49 (GMT) |
---|---|---|
committer | Scott Wood <scottwood@freescale.com> | 2014-04-08 19:58:35 (GMT) |
commit | 47d2261a3fa71cde24263559a4219a25e50d8c89 (patch) | |
tree | 28774d5b330ccf1b777a3af222d8356918328013 /net/sched | |
parent | fb7f27080adc65cd5f341bdf56a1d0c14f316c1b (diff) | |
parent | 5fb9d37f27351e42f002e372074249f92cbdf815 (diff) | |
download | linux-fsl-qoriq-47d2261a3fa71cde24263559a4219a25e50d8c89.tar.xz |
Merge branch 'merge' into sdk-v1.6.x
This reverts v3.13-rc3+ (78fd82238d0e5716) to v3.12, except for
commits which I noticed which appear relevant to the SDK.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Conflicts:
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_interrupts.S
arch/powerpc/kvm/e500.c
arch/powerpc/kvm/e500mc.c
arch/powerpc/sysdev/fsl_soc.h
drivers/Kconfig
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/dma/fsldma.c
drivers/dma/s3c24xx-dma.c
drivers/misc/Makefile
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mtd/devices/m25p80.c
drivers/net/ethernet/freescale/gianfar.h
drivers/platform/Kconfig
drivers/platform/Makefile
drivers/spi/spi-fsl-espi.c
include/crypto/algapi.h
include/linux/netdev_features.h
include/linux/skbuff.h
include/net/ip.h
net/core/ethtool.c
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/Kconfig | 10 | ||||
-rw-r--r-- | net/sched/Makefile | 1 | ||||
-rw-r--r-- | net/sched/act_police.c | 4 | ||||
-rw-r--r-- | net/sched/cls_basic.c | 2 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 385 | ||||
-rw-r--r-- | net/sched/cls_cgroup.c | 4 | ||||
-rw-r--r-- | net/sched/em_ipset.c | 7 | ||||
-rw-r--r-- | net/sched/em_meta.c | 4 | ||||
-rw-r--r-- | net/sched/sch_api.c | 3 | ||||
-rw-r--r-- | net/sched/sch_fq.c | 41 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 13 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 17 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 8 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 54 |
14 files changed, 49 insertions, 504 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index ad1f1d8..c03a32a 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -443,16 +443,6 @@ config NET_CLS_CGROUP To compile this code as a module, choose M here: the module will be called cls_cgroup. -config NET_CLS_BPF - tristate "BPF-based classifier" - select NET_CLS - ---help--- - If you say Y here, you will be able to classify packets based on - programmable BPF (JIT'ed) filters as an alternative to ematches. - - To compile this code as a module, choose M here: the module will - be called cls_bpf. - config NET_EMATCH bool "Extended Matches" select NET_CLS diff --git a/net/sched/Makefile b/net/sched/Makefile index 35fa47a..e5f9abe 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -50,7 +50,6 @@ obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o -obj-$(CONFIG_NET_CLS_BPF) += cls_bpf.o obj-$(CONFIG_NET_EMATCH) += ematch.o obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 272d8e9..189e3c5 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -231,14 +231,14 @@ override: } if (R_tab) { police->rate_present = true; - psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0); + psched_ratecfg_precompute(&police->rate, &R_tab->rate); qdisc_put_rtab(R_tab); } else { police->rate_present = false; } if (P_tab) { police->peak_present = true; - psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0); + psched_ratecfg_precompute(&police->peak, &P_tab->rate); qdisc_put_rtab(P_tab); } else { police->peak_present = false; diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 636d913..d76a35d 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -137,7 +137,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *est) { - int err; + int err = -EINVAL; struct tcf_exts e; struct tcf_ematch_tree t; diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c deleted file mode 100644 index 1002a82..0000000 --- a/net/sched/cls_bpf.c +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Berkeley Packet Filter based traffic classifier - * - * Might be used to classify traffic through flexible, user-defined and - * possibly JIT-ed BPF filters for traffic control as an alternative to - * ematches. - * - * (C) 2013 Daniel Borkmann <dborkman@redhat.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/skbuff.h> -#include <linux/filter.h> -#include <net/rtnetlink.h> -#include <net/pkt_cls.h> -#include <net/sock.h> - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); -MODULE_DESCRIPTION("TC BPF based classifier"); - -struct cls_bpf_head { - struct list_head plist; - u32 hgen; -}; - -struct cls_bpf_prog { - struct sk_filter *filter; - struct sock_filter *bpf_ops; - struct tcf_exts exts; - struct tcf_result res; - struct list_head link; - u32 handle; - u16 bpf_len; -}; - -static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { - [TCA_BPF_CLASSID] = { .type = NLA_U32 }, - [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, - [TCA_BPF_OPS] = { .type = NLA_BINARY, - .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, -}; - -static const struct tcf_ext_map bpf_ext_map = { - .action = TCA_BPF_ACT, - .police = TCA_BPF_POLICE, -}; - -static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res) -{ - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog; - int ret; - - list_for_each_entry(prog, &head->plist, link) { - int filter_res = SK_RUN_FILTER(prog->filter, skb); - - if (filter_res == 0) - continue; - - *res = prog->res; - if (filter_res != -1) - res->classid = filter_res; - - ret = tcf_exts_exec(skb, &prog->exts, res); - if (ret < 0) - continue; - - return ret; - } - - return -1; -} - -static int cls_bpf_init(struct tcf_proto *tp) -{ - struct cls_bpf_head *head; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (head == NULL) - return -ENOBUFS; - - INIT_LIST_HEAD(&head->plist); - tp->root = head; - - return 0; -} - -static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) -{ - tcf_unbind_filter(tp, &prog->res); - tcf_exts_destroy(tp, &prog->exts); - - sk_unattached_filter_destroy(prog->filter); - - kfree(prog->bpf_ops); - kfree(prog); -} - -static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) -{ - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg; - - list_for_each_entry(prog, &head->plist, link) { - if (prog == todel) { - tcf_tree_lock(tp); - list_del(&prog->link); - tcf_tree_unlock(tp); - - cls_bpf_delete_prog(tp, prog); - return 0; - } - } - - return -ENOENT; -} - -static void cls_bpf_destroy(struct tcf_proto *tp) -{ - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog, *tmp; - - list_for_each_entry_safe(prog, tmp, &head->plist, link) { - list_del(&prog->link); - cls_bpf_delete_prog(tp, prog); - } - - kfree(head); -} - -static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) -{ - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog; - unsigned long ret = 0UL; - - if (head == NULL) - return 0UL; - - list_for_each_entry(prog, &head->plist, link) { - if (prog->handle == handle) { - ret = (unsigned long) prog; - break; - } - } - - return ret; -} - -static void cls_bpf_put(struct tcf_proto *tp, unsigned long f) -{ -} - -static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, - struct cls_bpf_prog *prog, - unsigned long base, struct nlattr **tb, - struct nlattr *est) -{ - struct sock_filter *bpf_ops, *bpf_old; - struct tcf_exts exts; - struct sock_fprog tmp; - struct sk_filter *fp, *fp_old; - u16 bpf_size, bpf_len; - u32 classid; - int ret; - - if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID]) - return -EINVAL; - - ret = tcf_exts_validate(net, tp, tb, est, &exts, &bpf_ext_map); - if (ret < 0) - return ret; - - classid = nla_get_u32(tb[TCA_BPF_CLASSID]); - bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]); - if (bpf_len > BPF_MAXINSNS || bpf_len == 0) { - ret = -EINVAL; - goto errout; - } - - bpf_size = bpf_len * sizeof(*bpf_ops); - bpf_ops = kzalloc(bpf_size, GFP_KERNEL); - if (bpf_ops == NULL) { - ret = -ENOMEM; - goto errout; - } - - memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); - - tmp.len = bpf_len; - tmp.filter = (struct sock_filter __user *) bpf_ops; - - ret = sk_unattached_filter_create(&fp, &tmp); - if (ret) - goto errout_free; - - tcf_tree_lock(tp); - fp_old = prog->filter; - bpf_old = prog->bpf_ops; - - prog->bpf_len = bpf_len; - prog->bpf_ops = bpf_ops; - prog->filter = fp; - prog->res.classid = classid; - tcf_tree_unlock(tp); - - tcf_bind_filter(tp, &prog->res, base); - tcf_exts_change(tp, &prog->exts, &exts); - - if (fp_old) - sk_unattached_filter_destroy(fp_old); - if (bpf_old) - kfree(bpf_old); - - return 0; - -errout_free: - kfree(bpf_ops); -errout: - tcf_exts_destroy(tp, &exts); - return ret; -} - -static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, - struct cls_bpf_head *head) -{ - unsigned int i = 0x80000000; - - do { - if (++head->hgen == 0x7FFFFFFF) - head->hgen = 1; - } while (--i > 0 && cls_bpf_get(tp, head->hgen)); - if (i == 0) - pr_err("Insufficient number of handles\n"); - - return i; -} - -static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, - struct tcf_proto *tp, unsigned long base, - u32 handle, struct nlattr **tca, - unsigned long *arg) -{ - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg; - struct nlattr *tb[TCA_BPF_MAX + 1]; - int ret; - - if (tca[TCA_OPTIONS] == NULL) - return -EINVAL; - - ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy); - if (ret < 0) - return ret; - - if (prog != NULL) { - if (handle && prog->handle != handle) - return -EINVAL; - return cls_bpf_modify_existing(net, tp, prog, base, tb, - tca[TCA_RATE]); - } - - prog = kzalloc(sizeof(*prog), GFP_KERNEL); - if (prog == NULL) - return -ENOBUFS; - - if (handle == 0) - prog->handle = cls_bpf_grab_new_handle(tp, head); - else - prog->handle = handle; - if (prog->handle == 0) { - ret = -EINVAL; - goto errout; - } - - ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE]); - if (ret < 0) - goto errout; - - tcf_tree_lock(tp); - list_add(&prog->link, &head->plist); - tcf_tree_unlock(tp); - - *arg = (unsigned long) prog; - - return 0; -errout: - if (*arg == 0UL && prog) - kfree(prog); - - return ret; -} - -static int cls_bpf_dump(struct tcf_proto *tp, unsigned long fh, - struct sk_buff *skb, struct tcmsg *tm) -{ - struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh; - struct nlattr *nest, *nla; - - if (prog == NULL) - return skb->len; - - tm->tcm_handle = prog->handle; - - nest = nla_nest_start(skb, TCA_OPTIONS); - if (nest == NULL) - goto nla_put_failure; - - if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) - goto nla_put_failure; - if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len)) - goto nla_put_failure; - - nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len * - sizeof(struct sock_filter)); - if (nla == NULL) - goto nla_put_failure; - - memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); - - if (tcf_exts_dump(skb, &prog->exts, &bpf_ext_map) < 0) - goto nla_put_failure; - - nla_nest_end(skb, nest); - - if (tcf_exts_dump_stats(skb, &prog->exts, &bpf_ext_map) < 0) - goto nla_put_failure; - - return skb->len; - -nla_put_failure: - nla_nest_cancel(skb, nest); - return -1; -} - -static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) -{ - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog; - - list_for_each_entry(prog, &head->plist, link) { - if (arg->count < arg->skip) - goto skip; - if (arg->fn(tp, (unsigned long) prog, arg) < 0) { - arg->stop = 1; - break; - } -skip: - arg->count++; - } -} - -static struct tcf_proto_ops cls_bpf_ops __read_mostly = { - .kind = "bpf", - .owner = THIS_MODULE, - .classify = cls_bpf_classify, - .init = cls_bpf_init, - .destroy = cls_bpf_destroy, - .get = cls_bpf_get, - .put = cls_bpf_put, - .change = cls_bpf_change, - .delete = cls_bpf_delete, - .walk = cls_bpf_walk, - .dump = cls_bpf_dump, -}; - -static int __init cls_bpf_init_mod(void) -{ - return register_tcf_proto_ops(&cls_bpf_ops); -} - -static void __exit cls_bpf_exit_mod(void) -{ - unregister_tcf_proto_ops(&cls_bpf_ops); -} - -module_init(cls_bpf_init_mod); -module_exit(cls_bpf_exit_mod); diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 16006c9..867b4a3 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -72,11 +72,11 @@ static void cgrp_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) { struct task_struct *p; - struct cgroup_cls_state *cs = css_cls_state(css); - void *v = (void *)(unsigned long)cs->classid; + void *v; cgroup_taskset_for_each(p, css, tset) { task_lock(p); + v = (void *)(unsigned long)task_cls_classid(p); iterate_fd(p->files, 0, update_classid, v); task_unlock(p); } diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c index 527aeb7..938b7cb 100644 --- a/net/sched/em_ipset.c +++ b/net/sched/em_ipset.c @@ -24,12 +24,11 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len, { struct xt_set_info *set = data; ip_set_id_t index; - struct net *net = dev_net(qdisc_dev(tp->q)); if (data_len != sizeof(*set)) return -EINVAL; - index = ip_set_nfnl_get_byindex(net, set->index); + index = ip_set_nfnl_get_byindex(set->index); if (index == IPSET_INVALID_ID) return -ENOENT; @@ -38,7 +37,7 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len, if (em->data) return 0; - ip_set_nfnl_put(net, index); + ip_set_nfnl_put(index); return -ENOMEM; } @@ -46,7 +45,7 @@ static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em) { const struct xt_set_info *set = (const void *) em->data; if (set) { - ip_set_nfnl_put(dev_net(qdisc_dev(p->q)), set->index); + ip_set_nfnl_put(set->index); kfree((void *) em->data); } } diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index e5cef956..7c3de6f 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -793,10 +793,8 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len, goto errout; meta = kzalloc(sizeof(*meta), GFP_KERNEL); - if (meta == NULL) { - err = -ENOMEM; + if (meta == NULL) goto errout; - } memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right)); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index cd81505..2adda7f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -737,11 +737,9 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) const struct Qdisc_class_ops *cops; unsigned long cl; u32 parentid; - int drops; if (n == 0) return; - drops = max_t(int, n, 0); while ((parentid = sch->parent)) { if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) return; @@ -758,7 +756,6 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) cops->put(sch, cl); } sch->q.qlen -= n; - sch->qstats.drops += drops; } } EXPORT_SYMBOL(qdisc_tree_decrease_qlen); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 95d8439..a9dfdda 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -88,7 +88,7 @@ struct fq_sched_data { struct fq_flow internal; /* for non classified or high prio packets */ u32 quantum; u32 initial_quantum; - u32 flow_refill_delay; + u32 flow_default_rate;/* rate per flow : bytes per second */ u32 flow_max_rate; /* optional max rate per flow */ u32 flow_plimit; /* max packets per flow */ struct rb_root *fq_root; @@ -115,7 +115,6 @@ static struct fq_flow detached, throttled; static void fq_flow_set_detached(struct fq_flow *f) { f->next = &detached; - f->age = jiffies; } static bool fq_flow_is_detached(const struct fq_flow *f) @@ -210,15 +209,21 @@ static void fq_gc(struct fq_sched_data *q, } } +static const u8 prio2band[TC_PRIO_MAX + 1] = { + 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 +}; + static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) { struct rb_node **p, *parent; struct sock *sk = skb->sk; struct rb_root *root; struct fq_flow *f; + int band; /* warning: no starvation prevention... */ - if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL)) + band = prio2band[skb->priority & TC_PRIO_MAX]; + if (unlikely(band == 0)) return &q->internal; if (unlikely(!sk)) { @@ -250,7 +255,6 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) f->socket_hash != sk->sk_hash)) { f->credit = q->initial_quantum; f->socket_hash = sk->sk_hash; - f->time_next_packet = 0ULL; } return f; } @@ -368,20 +372,17 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } f->qlen++; + flow_queue_add(f, skb); if (skb_is_retransmit(skb)) q->stat_tcp_retrans++; sch->qstats.backlog += qdisc_pkt_len(skb); if (fq_flow_is_detached(f)) { fq_flow_add_tail(&q->new_flows, f); - if (time_after(jiffies, f->age + q->flow_refill_delay)) - f->credit = max_t(u32, f->credit, q->quantum); + if (q->quantum > f->credit) + f->credit = q->quantum; q->inactive_flows--; qdisc_unthrottled(sch); } - - /* Note: this overwrites f->age */ - flow_queue_add(f, skb); - if (unlikely(f == &q->internal)) { q->stat_internal_packets++; qdisc_unthrottled(sch); @@ -459,6 +460,7 @@ begin: fq_flow_add_tail(&q->old_flows, f); } else { fq_flow_set_detached(f); + f->age = jiffies; q->inactive_flows++; } goto begin; @@ -612,7 +614,6 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, - [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, }; static int fq_change(struct Qdisc *sch, struct nlattr *opt) @@ -654,8 +655,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) - pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", - nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); + q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]); if (tb[TCA_FQ_FLOW_MAX_RATE]) q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); @@ -669,12 +669,6 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt) err = -EINVAL; } - if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { - u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; - - q->flow_refill_delay = usecs_to_jiffies(usecs_delay); - } - if (!err) err = fq_resize(q, fq_log); @@ -710,7 +704,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt) q->flow_plimit = 100; q->quantum = 2 * psched_mtu(qdisc_dev(sch)); q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); - q->flow_refill_delay = msecs_to_jiffies(40); + q->flow_default_rate = 0; q->flow_max_rate = ~0U; q->rate_enable = 1; q->new_flows.first = NULL; @@ -737,16 +731,15 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) if (opts == NULL) goto nla_put_failure; - /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ - + /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore, + * do not bother giving its value + */ if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) || - nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, - jiffies_to_usecs(q->flow_refill_delay)) || nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) goto nla_put_failure; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 819c7a3..ed13d54 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) - ret = dev_hard_start_xmit(skb, dev, txq, NULL); + ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); @@ -844,7 +844,7 @@ void dev_deactivate_many(struct list_head *head) struct net_device *dev; bool sync_needed = false; - list_for_each_entry(dev, head, close_list) { + list_for_each_entry(dev, head, unreg_list) { netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); if (dev_ingress_queue(dev)) @@ -863,7 +863,7 @@ void dev_deactivate_many(struct list_head *head) synchronize_net(); /* Wait for outstanding qdisc_run calls. */ - list_for_each_entry(dev, head, close_list) + list_for_each_entry(dev, head, unreg_list) while (some_qdisc_is_busy(dev)) yield(); } @@ -872,7 +872,7 @@ void dev_deactivate(struct net_device *dev) { LIST_HEAD(single); - list_add(&dev->close_list, &single); + list_add(&dev->unreg_list, &single); dev_deactivate_many(&single); list_del(&single); } @@ -925,12 +925,11 @@ void dev_shutdown(struct net_device *dev) } void psched_ratecfg_precompute(struct psched_ratecfg *r, - const struct tc_ratespec *conf, - u64 rate64) + const struct tc_ratespec *conf) { memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; - r->rate_bytes_ps = max_t(u64, conf->rate, rate64); + r->rate_bytes_ps = conf->rate; r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); r->mult = 1; /* diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 0e1e38b..863846c 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -997,8 +997,6 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = { [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 }, - [TCA_HTB_RATE64] = { .type = NLA_U64 }, - [TCA_HTB_CEIL64] = { .type = NLA_U64 }, }; static void htb_work_func(struct work_struct *work) @@ -1116,12 +1114,6 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, opt.level = cl->level; if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt)) goto nla_put_failure; - if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && - nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps)) - goto nla_put_failure; - if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && - nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps)) - goto nla_put_failure; nla_nest_end(skb, nest); spin_unlock_bh(root_lock); @@ -1340,7 +1332,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, struct qdisc_rate_table *rtab = NULL, *ctab = NULL; struct nlattr *tb[TCA_HTB_MAX + 1]; struct tc_htb_opt *hopt; - u64 rate64, ceil64; /* extract all subattrs from opt attr */ if (!opt) @@ -1500,12 +1491,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, cl->prio = TC_HTB_NUMPRIO - 1; } - rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0; - - ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0; - - psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); - psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); + psched_ratecfg_precompute(&cl->rate, &hopt->rate); + psched_ratecfg_precompute(&cl->ceil, &hopt->ceil); cl->buffer = PSCHED_TICKS2NS(hopt->buffer); cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index bccd52b..b87e83d 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q) if (rnd < clg->a4) { clg->state = 4; return true; - } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { + } else if (clg->a4 < rnd && rnd < clg->a1) { clg->state = 3; return true; - } else if (clg->a1 + clg->a4 < rnd) + } else if (clg->a1 < rnd) clg->state = 1; break; @@ -235,6 +235,7 @@ static bool loss_4state(struct netem_sched_data *q) clg->state = 2; else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { clg->state = 1; + return true; } else if (clg->a2 + clg->a3 < rnd) { clg->state = 3; return true; @@ -268,11 +269,10 @@ static bool loss_gilb_ell(struct netem_sched_data *q) clg->state = 2; if (net_random() < clg->a4) return true; - break; case 2: if (net_random() < clg->a2) clg->state = 1; - if (net_random() > clg->a3) + if (clg->a3 > net_random()) return true; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index d166360..8994543 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -21,7 +21,6 @@ #include <net/netlink.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> -#include <net/tcp.h> /* Simple Token Bucket Filter. @@ -131,22 +130,6 @@ struct tbf_sched_data { }; -/* - * Return length of individual segments of a gso packet, - * including all headers (MAC, IP, TCP/UDP) - */ -static unsigned int skb_gso_seglen(const struct sk_buff *skb) -{ - unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); - const struct skb_shared_info *shinfo = skb_shinfo(skb); - - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) - hdr_len += tcp_hdrlen(skb); - else - hdr_len += sizeof(struct udphdr); - return hdr_len + shinfo->gso_size; -} - /* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ @@ -166,8 +149,12 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) while (segs) { nskb = segs->next; segs->next = NULL; - qdisc_skb_cb(segs)->pkt_len = segs->len; - ret = qdisc_enqueue(segs, q->qdisc); + if (likely(segs->len <= q->max_size)) { + qdisc_skb_cb(segs)->pkt_len = segs->len; + ret = qdisc_enqueue(segs, q->qdisc); + } else { + ret = qdisc_reshape_fail(skb, sch); + } if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) sch->qstats.drops++; @@ -189,7 +176,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) int ret; if (qdisc_pkt_len(skb) > q->max_size) { - if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size) + if (skb_is_gso(skb)) return tbf_segment(skb, sch); return qdisc_reshape_fail(skb, sch); } @@ -292,23 +279,20 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, - [TCA_TBF_RATE64] = { .type = NLA_U64 }, - [TCA_TBF_PRATE64] = { .type = NLA_U64 }, }; static int tbf_change(struct Qdisc *sch, struct nlattr *opt) { int err; struct tbf_sched_data *q = qdisc_priv(sch); - struct nlattr *tb[TCA_TBF_MAX + 1]; + struct nlattr *tb[TCA_TBF_PTAB + 1]; struct tc_tbf_qopt *qopt; struct qdisc_rate_table *rtab = NULL; struct qdisc_rate_table *ptab = NULL; struct Qdisc *child = NULL; int max_size, n; - u64 rate64 = 0, prate64 = 0; - err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy); + err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); if (err < 0) return err; @@ -345,11 +329,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) if (max_size < 0) goto done; - if (max_size < psched_mtu(qdisc_dev(sch))) - pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", - max_size, qdisc_dev(sch)->name, - psched_mtu(qdisc_dev(sch))); - if (q->qdisc != &noop_qdisc) { err = fifo_set_limit(q->qdisc, qopt->limit); if (err) @@ -375,13 +354,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) q->tokens = q->buffer; q->ptokens = q->mtu; - if (tb[TCA_TBF_RATE64]) - rate64 = nla_get_u64(tb[TCA_TBF_RATE64]); - psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64); + psched_ratecfg_precompute(&q->rate, &rtab->rate); if (ptab) { - if (tb[TCA_TBF_PRATE64]) - prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]); - psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64); + psched_ratecfg_precompute(&q->peak, &ptab->rate); q->peak_present = true; } else { q->peak_present = false; @@ -448,13 +423,6 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) opt.buffer = PSCHED_NS2TICKS(q->buffer); if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) goto nla_put_failure; - if (q->rate.rate_bytes_ps >= (1ULL << 32) && - nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps)) - goto nla_put_failure; - if (q->peak_present && - q->peak.rate_bytes_ps >= (1ULL << 32) && - nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps)) - goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; |