summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-04-08 20:27:17 (GMT)
committerScott Wood <scottwood@freescale.com>2014-04-08 20:27:25 (GMT)
commit63f2cd32fb5f8541636f47cc4c7c53cf8284851f (patch)
treed8e66fde55f70332fd495907d529996e9568038b /net/sched
parent47d2261a3fa71cde24263559a4219a25e50d8c89 (diff)
parent2acf1c25b979aaae983bfd7c0c4dd04b31ccfc01 (diff)
downloadlinux-fsl-qoriq-63f2cd32fb5f8541636f47cc4c7c53cf8284851f.tar.xz
Merge remote-tracking branch 'stable/linux-3.12.y' into sdk-v1.6.x
Signed-off-by: Scott Wood <scottwood@freescale.com> Conflicts: drivers/mmc/card/block.c
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_fq.c40
-rw-r--r--net/sched/sch_tbf.c25
2 files changed, 41 insertions, 24 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a9dfdda..2e55f81 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -88,7 +88,7 @@ struct fq_sched_data {
struct fq_flow internal; /* for non classified or high prio packets */
u32 quantum;
u32 initial_quantum;
- u32 flow_default_rate;/* rate per flow : bytes per second */
+ u32 flow_refill_delay;
u32 flow_max_rate; /* optional max rate per flow */
u32 flow_plimit; /* max packets per flow */
struct rb_root *fq_root;
@@ -115,6 +115,7 @@ static struct fq_flow detached, throttled;
static void fq_flow_set_detached(struct fq_flow *f)
{
f->next = &detached;
+ f->age = jiffies;
}
static bool fq_flow_is_detached(const struct fq_flow *f)
@@ -209,21 +210,15 @@ static void fq_gc(struct fq_sched_data *q,
}
}
-static const u8 prio2band[TC_PRIO_MAX + 1] = {
- 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
-};
-
static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
{
struct rb_node **p, *parent;
struct sock *sk = skb->sk;
struct rb_root *root;
struct fq_flow *f;
- int band;
/* warning: no starvation prevention... */
- band = prio2band[skb->priority & TC_PRIO_MAX];
- if (unlikely(band == 0))
+ if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
return &q->internal;
if (unlikely(!sk)) {
@@ -372,17 +367,20 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
f->qlen++;
- flow_queue_add(f, skb);
if (skb_is_retransmit(skb))
q->stat_tcp_retrans++;
sch->qstats.backlog += qdisc_pkt_len(skb);
if (fq_flow_is_detached(f)) {
fq_flow_add_tail(&q->new_flows, f);
- if (q->quantum > f->credit)
- f->credit = q->quantum;
+ if (time_after(jiffies, f->age + q->flow_refill_delay))
+ f->credit = max_t(u32, f->credit, q->quantum);
q->inactive_flows--;
qdisc_unthrottled(sch);
}
+
+ /* Note: this overwrites f->age */
+ flow_queue_add(f, skb);
+
if (unlikely(f == &q->internal)) {
q->stat_internal_packets++;
qdisc_unthrottled(sch);
@@ -460,7 +458,6 @@ begin:
fq_flow_add_tail(&q->old_flows, f);
} else {
fq_flow_set_detached(f);
- f->age = jiffies;
q->inactive_flows++;
}
goto begin;
@@ -614,6 +611,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
[TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
};
static int fq_change(struct Qdisc *sch, struct nlattr *opt)
@@ -655,7 +653,8 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
- q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
+ pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
+ nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
if (tb[TCA_FQ_FLOW_MAX_RATE])
q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
@@ -669,6 +668,12 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
err = -EINVAL;
}
+ if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
+ u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
+
+ q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
+ }
+
if (!err)
err = fq_resize(q, fq_log);
@@ -704,7 +709,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
q->flow_plimit = 100;
q->quantum = 2 * psched_mtu(qdisc_dev(sch));
q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
- q->flow_default_rate = 0;
+ q->flow_refill_delay = msecs_to_jiffies(40);
q->flow_max_rate = ~0U;
q->rate_enable = 1;
q->new_flows.first = NULL;
@@ -731,15 +736,16 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
if (opts == NULL)
goto nla_put_failure;
- /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
- * do not bother giving its value
- */
+ /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
+
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
+ jiffies_to_usecs(q->flow_refill_delay)) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 8994543..29e1c2a 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -130,6 +130,16 @@ struct tbf_sched_data {
};
+/*
+ * Return length of individual segments of a gso packet,
+ * including all headers (MAC, IP, TCP/UDP)
+ */
+static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
+{
+ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+ return hdr_len + skb_gso_transport_seglen(skb);
+}
+
/* GSO packet is too big, segment it so that tbf can transmit
* each segment in time
*/
@@ -149,12 +159,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
while (segs) {
nskb = segs->next;
segs->next = NULL;
- if (likely(segs->len <= q->max_size)) {
- qdisc_skb_cb(segs)->pkt_len = segs->len;
- ret = qdisc_enqueue(segs, q->qdisc);
- } else {
- ret = qdisc_reshape_fail(skb, sch);
- }
+ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
@@ -176,7 +182,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int ret;
if (qdisc_pkt_len(skb) > q->max_size) {
- if (skb_is_gso(skb))
+ if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
return tbf_segment(skb, sch);
return qdisc_reshape_fail(skb, sch);
}
@@ -329,6 +335,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
if (max_size < 0)
goto done;
+ if (max_size < psched_mtu(qdisc_dev(sch)))
+ pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
+ max_size, qdisc_dev(sch)->name,
+ psched_mtu(qdisc_dev(sch)));
+
if (q->qdisc != &noop_qdisc) {
err = fifo_set_limit(q->qdisc, qopt->limit);
if (err)