aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-03-23 11:28:07 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-25 22:27:50 -0700
commit104e0878984bb467e3f54d61105d8903babb4ec1 (patch)
tree796a0b5d189582255c3510817491428671921d5c /net
parent7c59e25f3186f26e85b13a318dbc4482d1d363e9 (diff)
downloadkernel_samsung_aries-104e0878984bb467e3f54d61105d8903babb4ec1.zip
kernel_samsung_aries-104e0878984bb467e3f54d61105d8903babb4ec1.tar.gz
kernel_samsung_aries-104e0878984bb467e3f54d61105d8903babb4ec1.tar.bz2
[NET_SCHED]: kill PSCHED_TLESS
Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_cbq.c7
-rw-r--r--net/sched/sch_netem.c6
2 files changed, 6 insertions, 7 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 290b26b..9e6cdab 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,7 +390,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
now = q->now + incr;
do {
- if (PSCHED_TLESS(cl->undertime, now)) {
+ if (cl->undertime < now) {
q->toplevel = cl->level;
return;
}
@@ -845,8 +845,7 @@ cbq_under_limit(struct cbq_class *cl)
if (cl->tparent == NULL)
return cl;
- if (PSCHED_IS_PASTPERFECT(cl->undertime) ||
- !PSCHED_TLESS(q->now, cl->undertime)) {
+ if (PSCHED_IS_PASTPERFECT(cl->undertime) || q->now >= cl->undertime) {
cl->delayed = 0;
return cl;
}
@@ -870,7 +869,7 @@ cbq_under_limit(struct cbq_class *cl)
if (cl->level > q->toplevel)
return NULL;
} while (!PSCHED_IS_PASTPERFECT(cl->undertime) &&
- PSCHED_TLESS(q->now, cl->undertime));
+ q->now < cl->undertime);
cl->delayed = 0;
return cl;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6044ae7..5d571aa 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -286,7 +286,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
/* if more time remaining? */
PSCHED_GET_TIME(now);
- if (!PSCHED_TLESS(now, cb->time_to_send)) {
+ if (cb->time_to_send <= now) {
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
return skb;
@@ -494,7 +494,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
if (likely(skb_queue_len(list) < q->limit)) {
/* Optimize for add at tail */
- if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) {
+ if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
q->oldest = tnext;
return qdisc_enqueue_tail(nskb, sch);
}
@@ -503,7 +503,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
const struct netem_skb_cb *cb
= (const struct netem_skb_cb *)skb->cb;
- if (!PSCHED_TLESS(tnext, cb->time_to_send))
+ if (tnext >= cb->time_to_send)
break;
}