OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [sched/] [sch_htb.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* vim: ts=8 sw=8
2
 * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
3
 *
4
 *              This program is free software; you can redistribute it and/or
5
 *              modify it under the terms of the GNU General Public License
6
 *              as published by the Free Software Foundation; either version
7
 *              2 of the License, or (at your option) any later version.
8
 *
9
 * Authors:     Martin Devera, <devik@cdi.cz>
10
 *
11
 * Credits (in time order) for older HTB versions:
12
 *              Stef Coene <stef.coene@docum.org>
13
 *                      HTB support at LARTC mailing list
14
 *              Ondrej Kraus, <krauso@barr.cz>
15
 *                      found missing INIT_QDISC(htb)
16
 *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17
 *                      helped a lot to locate nasty class stall bug
18
 *              Andi Kleen, Jamal Hadi, Bert Hubert
19
 *                      code review and helpful comments on shaping
20
 *              Tomasz Wrona, <tw@eter.tym.pl>
21
 *                      created test case so that I was able to fix nasty bug
22
 *              Wilfried Weissmann
23
 *                      spotted bug in dequeue code and helped with fix
24
 *              Jiri Fojtasek
25
 *                      fixed requeue routine
26
 *              and many others. thanks.
27
 *
28
 * $Id: sch_htb.c,v 1.1.1.1 2004-04-15 01:16:18 phoenix Exp $
29
 */
30
#include <linux/config.h>
31
#include <linux/module.h>
32
#include <asm/uaccess.h>
33
#include <asm/system.h>
34
#include <asm/bitops.h>
35
#include <linux/types.h>
36
#include <linux/kernel.h>
37
#include <linux/version.h>
38
#include <linux/sched.h>
39
#include <linux/string.h>
40
#include <linux/mm.h>
41
#include <linux/socket.h>
42
#include <linux/sockios.h>
43
#include <linux/in.h>
44
#include <linux/errno.h>
45
#include <linux/interrupt.h>
46
#include <linux/if_ether.h>
47
#include <linux/inet.h>
48
#include <linux/netdevice.h>
49
#include <linux/etherdevice.h>
50
#include <linux/notifier.h>
51
#include <net/ip.h>
52
#include <net/route.h>
53
#include <linux/skbuff.h>
54
#include <linux/list.h>
55
#include <linux/compiler.h>
56
#include <net/sock.h>
57
#include <net/pkt_sched.h>
58
#include <linux/rbtree.h>
59
 
60
/* HTB algorithm.
61
    Author: devik@cdi.cz
62
    ========================================================================
63
    HTB is like TBF with multiple classes. It is also similar to CBQ because
64
    it allows to assign priority to each class in hierarchy.
65
    In fact it is another implementation of Floyd's formal sharing.
66
 
67
    Levels:
68
    Each class is assigned level. Leaf has ALWAYS level 0 and root
69
    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
70
    one less than their parent.
71
*/
72
 
73
#define HTB_HSIZE 16    /* classid hash size */
74
#define HTB_EWMAC 2     /* rate average over HTB_EWMAC*HTB_HSIZE sec */
75
#define HTB_DEBUG 1     /* compile debugging support (activated by tc tool) */
76
#define HTB_RATECM 1    /* whether to use rate computer */
77
#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
78
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
79
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
80
#define HTB_VER 0x30010 /* major must be matched with number suplied by TC as version */
81
 
82
#if HTB_VER >> 16 != TC_HTB_PROTOVER
83
#error "Mismatched sch_htb.c and pkt_sch.h"
84
#endif
85
 
86
/* debugging support; S is subsystem, these are defined:
87
 
88
  1 - enqueue
89
  2 - drop & requeue
90
  3 - dequeue main
91
  4 - dequeue one prio DRR part
92
  5 - dequeue class accounting
93
  6 - class overlimit status computation
94
  7 - hint tree
95
  8 - event queue
96
 10 - rate estimator
97
 11 - classifier
98
 12 - fast dequeue cache
99
 
100
 L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
101
 q->debug uint32 contains 16 2-bit fields one for subsystem starting
102
 from LSB
103
 */
104
#ifdef HTB_DEBUG
105
#define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
106
#define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
107
        printk(KERN_DEBUG FMT,##ARG)
108
#define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
109
#define HTB_PASSQ q,
110
#define HTB_ARGQ struct htb_sched *q,
111
#define static
112
#undef __inline__
113
#define __inline__
114
#undef inline
115
#define inline
116
#define HTB_CMAGIC 0xFEFAFEF1
117
#define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
118
                if ((N)->rb_color == -1) break; \
119
                rb_erase(N,R); \
120
                (N)->rb_color = -1; } while (0)
121
#else
122
#define HTB_DBG_COND(S,L) (0)
123
#define HTB_DBG(S,L,FMT,ARG...)
124
#define HTB_PASSQ
125
#define HTB_ARGQ
126
#define HTB_CHCL(cl)
127
#define htb_safe_rb_erase(N,R) rb_erase(N,R)
128
#endif
129
 
130
 
131
/* used internaly to keep status of single class */
132
enum htb_cmode {
133
    HTB_CANT_SEND,              /* class can't send and can't borrow */
134
    HTB_MAY_BORROW,             /* class can't send but may borrow */
135
    HTB_CAN_SEND                /* class can send */
136
};
137
 
138
/* interior & leaf nodes; props specific to leaves are marked L: */
139
struct htb_class
140
{
141
#ifdef HTB_DEBUG
142
        unsigned magic;
143
#endif
144
    /* general class parameters */
145
    u32 classid;
146
    struct tc_stats     stats;  /* generic stats */
147
    struct tc_htb_xstats xstats;/* our special stats */
148
    int refcnt;                 /* usage count of this class */
149
 
150
#ifdef HTB_RATECM
151
    /* rate measurement counters */
152
    unsigned long rate_bytes,sum_bytes;
153
    unsigned long rate_packets,sum_packets;
154
#endif
155
 
156
    /* topology */
157
    int level;                  /* our level (see above) */
158
    struct htb_class *parent;   /* parent class */
159
    struct list_head hlist;     /* classid hash list item */
160
    struct list_head sibling;   /* sibling list item */
161
    struct list_head children;  /* children list */
162
 
163
    union {
164
            struct htb_class_leaf {
165
                    struct Qdisc *q;
166
                    int prio;
167
                    int aprio;
168
                    int quantum;
169
                    int deficit[TC_HTB_MAXDEPTH];
170
                    struct list_head drop_list;
171
            } leaf;
172
            struct htb_class_inner {
173
                    rb_root_t feed[TC_HTB_NUMPRIO];     /* feed trees */
174
                    rb_node_t *ptr[TC_HTB_NUMPRIO];     /* current class ptr */
175
            } inner;
176
    } un;
177
    rb_node_t node[TC_HTB_NUMPRIO];     /* node for self or feed tree */
178
    rb_node_t pq_node;                  /* node for event queue */
179
    unsigned long pq_key;       /* the same type as jiffies global */
180
 
181
    int prio_activity;          /* for which prios are we active */
182
    enum htb_cmode cmode;       /* current mode of the class */
183
 
184
    /* class attached filters */
185
    struct tcf_proto *filter_list;
186
    int filter_cnt;
187
 
188
    int warned;         /* only one warning about non work conserving .. */
189
 
190
    /* token bucket parameters */
191
    struct qdisc_rate_table *rate;      /* rate table of the class itself */
192
    struct qdisc_rate_table *ceil;      /* ceiling rate (limits borrows too) */
193
    long buffer,cbuffer;                /* token bucket depth/rate */
194
    long mbuffer;                       /* max wait time */
195
    long tokens,ctokens;                /* current number of tokens */
196
    psched_time_t t_c;                  /* checkpoint time */
197
};
198
 
199
/* TODO: maybe compute rate when size is too large .. or drop ? */
200
static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
201
        int size)
202
{
203
    int slot = size >> rate->rate.cell_log;
204
    if (slot > 255) {
205
        cl->xstats.giants++;
206
        slot = 255;
207
    }
208
    return rate->data[slot];
209
}
210
 
211
struct htb_sched
212
{
213
    struct list_head root;                      /* root classes list */
214
    struct list_head hash[HTB_HSIZE];           /* hashed by classid */
215
    struct list_head drops[TC_HTB_NUMPRIO];     /* active leaves (for drops) */
216
 
217
    /* self list - roots of self generating tree */
218
    rb_root_t row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
219
    int row_mask[TC_HTB_MAXDEPTH];
220
    rb_node_t *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
221
 
222
    /* self wait list - roots of wait PQs per row */
223
    rb_root_t wait_pq[TC_HTB_MAXDEPTH];
224
 
225
    /* time of nearest event per level (row) */
226
    unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
227
 
228
    /* cached value of jiffies in dequeue */
229
    unsigned long jiffies;
230
 
231
    /* whether we hit non-work conserving class during this dequeue; we use */
232
    int nwc_hit;        /* this to disable mindelay complaint in dequeue */
233
 
234
    int defcls;         /* class where unclassified flows go to */
235
    u32 debug;          /* subsystem debug levels */
236
 
237
    /* filters for qdisc itself */
238
    struct tcf_proto *filter_list;
239
    int filter_cnt;
240
 
241
    int rate2quantum;           /* quant = rate / rate2quantum */
242
    psched_time_t now;          /* cached dequeue time */
243
    struct timer_list timer;    /* send delay timer */
244
#ifdef HTB_RATECM
245
    struct timer_list rttim;    /* rate computer timer */
246
    int recmp_bucket;           /* which hash bucket to recompute next */
247
#endif
248
 
249
    /* non shaped skbs; let them go directly thru */
250
    struct sk_buff_head direct_queue;
251
    int direct_qlen;  /* max qlen of above */
252
 
253
    long direct_pkts;
254
};
255
 
256
/* compute hash of size HTB_HSIZE for given handle */
257
static __inline__ int htb_hash(u32 h)
258
{
259
#if HTB_HSIZE != 16
260
 #error "Declare new hash for your HTB_HSIZE"
261
#endif
262
    h ^= h>>8;  /* stolen from cbq_hash */
263
    h ^= h>>4;
264
    return h & 0xf;
265
}
266
 
267
/* find class in global hash table using given handle */
268
static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
269
{
270
        struct htb_sched *q = (struct htb_sched *)sch->data;
271
        struct list_head *p;
272
        if (TC_H_MAJ(handle) != sch->handle)
273
                return NULL;
274
 
275
        list_for_each (p,q->hash+htb_hash(handle)) {
276
                struct htb_class *cl = list_entry(p,struct htb_class,hlist);
277
                if (cl->classid == handle)
278
                        return cl;
279
        }
280
        return NULL;
281
}
282
 
283
/**
284
 * htb_classify - classify a packet into class
285
 *
286
 * It returns NULL if the packet should be dropped or -1 if the packet
287
 * should be passed directly thru. In all other cases leaf class is returned.
288
 * We allow direct class selection by classid in priority. The we examine
289
 * filters in qdisc and in inner nodes (if higher filter points to the inner
290
 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
291
 * internal fifo (direct). These packets then go directly thru. If we still
292
 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
293
 * then finish and return direct queue.
294
 */
295
#define HTB_DIRECT (struct htb_class*)-1
296
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
297
{
298
        struct htb_sched *q = (struct htb_sched *)sch->data;
299
        struct htb_class *cl;
300
        struct tcf_result res;
301
        struct tcf_proto *tcf;
302
        int result;
303
 
304
        /* allow to select class by setting skb->priority to valid classid;
305
           note that nfmark can be used too by attaching filter fw with no
306
           rules in it */
307
        if (skb->priority == sch->handle)
308
                return HTB_DIRECT;  /* X:0 (direct flow) selected */
309
        if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
310
                return cl;
311
 
312
        tcf = q->filter_list;
313
        while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
314
#ifdef CONFIG_NET_CLS_POLICE
315
                if (result == TC_POLICE_SHOT)
316
                        return NULL;
317
#endif
318
                if ((cl = (void*)res.class) == NULL) {
319
                        if (res.classid == sch->handle)
320
                                return HTB_DIRECT;  /* X:0 (direct flow) */
321
                        if ((cl = htb_find(res.classid,sch)) == NULL)
322
                                break; /* filter selected invalid classid */
323
                }
324
                if (!cl->level)
325
                        return cl; /* we hit leaf; return it */
326
 
327
                /* we have got inner class; apply inner filter chain */
328
                tcf = cl->filter_list;
329
        }
330
        /* classification failed; try to use default class */
331
        cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
332
        if (!cl || cl->level)
333
                return HTB_DIRECT; /* bad default .. this is safe bet */
334
        return cl;
335
}
336
 
337
#ifdef HTB_DEBUG
338
static void htb_next_rb_node(rb_node_t **n);
339
#define HTB_DUMTREE(root,memb) if(root) { \
340
        rb_node_t *n = (root)->rb_node; \
341
        while (n->rb_left) n = n->rb_left; \
342
        while (n) { \
343
                struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
344
                printk(" %x",cl->classid); htb_next_rb_node (&n); \
345
        } }
346
 
347
static void htb_debug_dump (struct htb_sched *q)
348
{
349
        int i,p;
350
        printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
351
        /* rows */
352
        for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
353
                printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
354
                for (p=0;p<TC_HTB_NUMPRIO;p++) {
355
                        if (!q->row[i][p].rb_node) continue;
356
                        printk(" p%d:",p);
357
                        HTB_DUMTREE(q->row[i]+p,node[p]);
358
                }
359
                printk("\n");
360
        }
361
        /* classes */
362
        for (i = 0; i < HTB_HSIZE; i++) {
363
                struct list_head *l;
364
                list_for_each (l,q->hash+i) {
365
                        struct htb_class *cl = list_entry(l,struct htb_class,hlist);
366
                        long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
367
                        printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
368
                                        "pa=%x f:",
369
                                cl->classid,cl->cmode,cl->tokens,cl->ctokens,
370
                                cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
371
                                cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
372
                        if (cl->level)
373
                        for (p=0;p<TC_HTB_NUMPRIO;p++) {
374
                                if (!cl->un.inner.feed[p].rb_node) continue;
375
                                printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
376
                                HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
377
                        }
378
                        printk("\n");
379
                }
380
        }
381
}
382
#endif
383
/**
384
 * htb_add_to_id_tree - adds class to the round robin list
385
 *
386
 * Routine adds class to the list (actually tree) sorted by classid.
387
 * Make sure that class is not already on such list for given prio.
388
 */
389
static void htb_add_to_id_tree (HTB_ARGQ rb_root_t *root,
390
                struct htb_class *cl,int prio)
391
{
392
        rb_node_t **p = &root->rb_node, *parent = NULL;
393
        HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
394
#ifdef HTB_DEBUG
395
        if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
396
        HTB_CHCL(cl);
397
        if (*p) {
398
                struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
399
                HTB_CHCL(x);
400
        }
401
#endif
402
        while (*p) {
403
                struct htb_class *c; parent = *p;
404
                c = rb_entry(parent, struct htb_class, node[prio]);
405
                HTB_CHCL(c);
406
                if (cl->classid > c->classid)
407
                        p = &parent->rb_right;
408
                else
409
                        p = &parent->rb_left;
410
        }
411
        rb_link_node(&cl->node[prio], parent, p);
412
        rb_insert_color(&cl->node[prio], root);
413
}
414
 
415
/**
416
 * htb_add_to_wait_tree - adds class to the event queue with delay
417
 *
418
 * The class is added to priority event queue to indicate that class will
419
 * change its mode in cl->pq_key microseconds. Make sure that class is not
420
 * already in the queue.
421
 */
422
static void htb_add_to_wait_tree (struct htb_sched *q,
423
                struct htb_class *cl,long delay,int debug_hint)
424
{
425
        rb_node_t **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
426
        HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
427
#ifdef HTB_DEBUG
428
        if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
429
        HTB_CHCL(cl);
430
        if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
431
                printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
432
#endif
433
        cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
434
        if (cl->pq_key == q->jiffies)
435
                cl->pq_key++;
436
 
437
        /* update the nearest event cache */
438
        if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
439
                q->near_ev_cache[cl->level] = cl->pq_key;
440
 
441
        while (*p) {
442
                struct htb_class *c; parent = *p;
443
                c = rb_entry(parent, struct htb_class, pq_node);
444
                if (time_after_eq(cl->pq_key, c->pq_key))
445
                        p = &parent->rb_right;
446
                else
447
                        p = &parent->rb_left;
448
        }
449
        rb_link_node(&cl->pq_node, parent, p);
450
        rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
451
}
452
 
453
/**
454
 * htb_next_rb_node - finds next node in binary tree
455
 *
456
 * When we are past last key we return NULL.
457
 * Average complexity is 2 steps per call.
458
 */
459
static void htb_next_rb_node(rb_node_t **n)
460
{
461
        rb_node_t *p;
462
        if ((*n)->rb_right) {
463
                /* child at right. use it or its leftmost ancestor */
464
                *n = (*n)->rb_right;
465
                while ((*n)->rb_left)
466
                        *n = (*n)->rb_left;
467
                return;
468
        }
469
        while ((p = (*n)->rb_parent) != NULL) {
470
                /* if we've arrived from left child then we have next node */
471
                if (p->rb_left == *n) break;
472
                *n = p;
473
        }
474
        *n = p;
475
}
476
 
477
/**
478
 * htb_add_class_to_row - add class to its row
479
 *
480
 * The class is added to row at priorities marked in mask.
481
 * It does nothing if mask == 0.
482
 */
483
static inline void htb_add_class_to_row(struct htb_sched *q,
484
                struct htb_class *cl,int mask)
485
{
486
        HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
487
                        cl->classid,mask,q->row_mask[cl->level]);
488
        HTB_CHCL(cl);
489
        q->row_mask[cl->level] |= mask;
490
        while (mask) {
491
                int prio = ffz(~mask);
492
                mask &= ~(1 << prio);
493
                htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
494
        }
495
}
496
 
497
/**
498
 * htb_remove_class_from_row - removes class from its row
499
 *
500
 * The class is removed from row at priorities marked in mask.
501
 * It does nothing if mask == 0.
502
 */
503
static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
504
                struct htb_class *cl,int mask)
505
{
506
        int m = 0;
507
        HTB_CHCL(cl);
508
        while (mask) {
509
                int prio = ffz(~mask);
510
                mask &= ~(1 << prio);
511
                if (q->ptr[cl->level][prio] == cl->node+prio)
512
                        htb_next_rb_node(q->ptr[cl->level]+prio);
513
                htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
514
                if (!q->row[cl->level][prio].rb_node)
515
                        m |= 1 << prio;
516
        }
517
        HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
518
                        cl->classid,mask,q->row_mask[cl->level],m);
519
        q->row_mask[cl->level] &= ~m;
520
}
521
 
522
/**
523
 * htb_activate_prios - creates active classe's feed chain
524
 *
525
 * The class is connected to ancestors and/or appropriate rows
526
 * for priorities it is participating on. cl->cmode must be new
527
 * (activated) mode. It does nothing if cl->prio_activity == 0.
528
 */
529
static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
530
{
531
        struct htb_class *p = cl->parent;
532
        long m,mask = cl->prio_activity;
533
        HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
534
        HTB_CHCL(cl);
535
 
536
        while (cl->cmode == HTB_MAY_BORROW && p && mask) {
537
                HTB_CHCL(p);
538
                m = mask; while (m) {
539
                        int prio = ffz(~m);
540
                        m &= ~(1 << prio);
541
 
542
                        if (p->un.inner.feed[prio].rb_node)
543
                                /* parent already has its feed in use so that
544
                                   reset bit in mask as parent is already ok */
545
                                mask &= ~(1 << prio);
546
 
547
                        htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
548
                }
549
                HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
550
                                p->classid,p->prio_activity,mask,p->cmode);
551
                p->prio_activity |= mask;
552
                cl = p; p = cl->parent;
553
                HTB_CHCL(cl);
554
        }
555
        if (cl->cmode == HTB_CAN_SEND && mask)
556
                htb_add_class_to_row(q,cl,mask);
557
}
558
 
559
/**
560
 * htb_deactivate_prios - remove class from feed chain
561
 *
562
 * cl->cmode must represent old mode (before deactivation). It does
563
 * nothing if cl->prio_activity == 0. Class is removed from all feed
564
 * chains and rows.
565
 */
566
static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
567
{
568
        struct htb_class *p = cl->parent;
569
        long m,mask = cl->prio_activity;
570
        HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
571
        HTB_CHCL(cl);
572
 
573
        while (cl->cmode == HTB_MAY_BORROW && p && mask) {
574
                m = mask; mask = 0;
575
                while (m) {
576
                        int prio = ffz(~m);
577
                        m &= ~(1 << prio);
578
 
579
                        if (p->un.inner.ptr[prio] == cl->node+prio)
580
                                htb_next_rb_node(p->un.inner.ptr + prio);
581
 
582
                        htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
583
 
584
                        if (!p->un.inner.feed[prio].rb_node)
585
                                mask |= 1 << prio;
586
                }
587
                HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
588
                                p->classid,p->prio_activity,mask,p->cmode);
589
                p->prio_activity &= ~mask;
590
                cl = p; p = cl->parent;
591
                HTB_CHCL(cl);
592
        }
593
        if (cl->cmode == HTB_CAN_SEND && mask)
594
                htb_remove_class_from_row(q,cl,mask);
595
}
596
 
597
/**
598
 * htb_class_mode - computes and returns current class mode
599
 *
600
 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
601
 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
602
 * from now to time when cl will change its state.
603
 * Also it is worth to note that class mode doesn't change simply
604
 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
605
 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
606
 * mode transitions per time unit. The speed gain is about 1/6.
607
 */
608
static __inline__ enum htb_cmode
609
htb_class_mode(struct htb_class *cl,long *diff)
610
{
611
    long toks;
612
 
613
    if ((toks = (cl->ctokens + *diff)) < (
614
#if HTB_HYSTERESIS
615
            cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
616
#endif
617
            0)) {
618
            *diff = -toks;
619
            return HTB_CANT_SEND;
620
    }
621
    if ((toks = (cl->tokens + *diff)) >= (
622
#if HTB_HYSTERESIS
623
            cl->cmode == HTB_CAN_SEND ? -cl->buffer :
624
#endif
625
            0))
626
            return HTB_CAN_SEND;
627
 
628
    *diff = -toks;
629
    return HTB_MAY_BORROW;
630
}
631
 
632
/**
633
 * htb_change_class_mode - changes classe's mode
634
 *
635
 * This should be the only way how to change classe's mode under normal
636
 * cirsumstances. Routine will update feed lists linkage, change mode
637
 * and add class to the wait event queue if appropriate. New mode should
638
 * be different from old one and cl->pq_key has to be valid if changing
639
 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
640
 */
641
static void
642
htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
643
{
644
        enum htb_cmode new_mode = htb_class_mode(cl,diff);
645
 
646
        HTB_CHCL(cl);
647
        HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
648
 
649
        if (new_mode == cl->cmode)
650
                return;
651
 
652
        if (cl->prio_activity) { /* not neccessary: speed optimization */
653
                if (cl->cmode != HTB_CANT_SEND)
654
                        htb_deactivate_prios(q,cl);
655
                cl->cmode = new_mode;
656
                if (new_mode != HTB_CANT_SEND)
657
                        htb_activate_prios(q,cl);
658
        } else
659
                cl->cmode = new_mode;
660
}
661
 
662
/**
663
 * htb_activate - inserts leaf cl into appropriate active feeds
664
 *
665
 * Routine learns (new) priority of leaf and activates feed chain
666
 * for the prio. It can be called on already active leaf safely.
667
 * It also adds leaf into droplist.
668
 */
669
static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
670
{
671
        BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
672
        HTB_CHCL(cl);
673
        if (!cl->prio_activity) {
674
                cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
675
                htb_activate_prios(q,cl);
676
                list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
677
        }
678
}
679
 
680
/**
681
 * htb_deactivate - remove leaf cl from active feeds
682
 *
683
 * Make sure that leaf is active. In the other words it can't be called
684
 * with non-active leaf. It also removes class from the drop list.
685
 */
686
static __inline__ void
687
htb_deactivate(struct htb_sched *q,struct htb_class *cl)
688
{
689
        BUG_TRAP(cl->prio_activity);
690
        HTB_CHCL(cl);
691
        htb_deactivate_prios(q,cl);
692
        cl->prio_activity = 0;
693
        list_del_init(&cl->un.leaf.drop_list);
694
}
695
 
696
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
697
{
698
    struct htb_sched *q = (struct htb_sched *)sch->data;
699
    struct htb_class *cl = htb_classify(skb,sch);
700
 
701
    if (cl == HTB_DIRECT || !cl) {
702
        /* enqueue to helper queue */
703
        if (q->direct_queue.qlen < q->direct_qlen && cl) {
704
            __skb_queue_tail(&q->direct_queue, skb);
705
            q->direct_pkts++;
706
        } else {
707
            kfree_skb (skb);
708
            sch->stats.drops++;
709
            return NET_XMIT_DROP;
710
        }
711
    } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
712
        sch->stats.drops++;
713
        cl->stats.drops++;
714
        return NET_XMIT_DROP;
715
    } else {
716
        cl->stats.packets++; cl->stats.bytes += skb->len;
717
        htb_activate (q,cl);
718
    }
719
 
720
    sch->q.qlen++;
721
    sch->stats.packets++; sch->stats.bytes += skb->len;
722
    HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
723
    return NET_XMIT_SUCCESS;
724
}
725
 
726
/* TODO: requeuing packet charges it to policers again !! */
727
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
728
{
729
    struct htb_sched *q = (struct htb_sched *)sch->data;
730
    struct htb_class *cl = htb_classify(skb,sch);
731
    struct sk_buff *tskb;
732
 
733
    if (cl == HTB_DIRECT || !cl) {
734
        /* enqueue to helper queue */
735
        if (q->direct_queue.qlen < q->direct_qlen && cl) {
736
            __skb_queue_head(&q->direct_queue, skb);
737
        } else {
738
            __skb_queue_head(&q->direct_queue, skb);
739
            tskb = __skb_dequeue_tail(&q->direct_queue);
740
            kfree_skb (tskb);
741
            sch->stats.drops++;
742
            return NET_XMIT_CN;
743
        }
744
    } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
745
        sch->stats.drops++;
746
        cl->stats.drops++;
747
        return NET_XMIT_DROP;
748
    } else
749
            htb_activate (q,cl);
750
 
751
    sch->q.qlen++;
752
    HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
753
    return NET_XMIT_SUCCESS;
754
}
755
 
756
static void htb_timer(unsigned long arg)
757
{
758
    struct Qdisc *sch = (struct Qdisc*)arg;
759
    sch->flags &= ~TCQ_F_THROTTLED;
760
    wmb();
761
    netif_schedule(sch->dev);
762
}
763
 
764
#ifdef HTB_RATECM
765
#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
766
static void htb_rate_timer(unsigned long arg)
767
{
768
        struct Qdisc *sch = (struct Qdisc*)arg;
769
        struct htb_sched *q = (struct htb_sched *)sch->data;
770
        struct list_head *p;
771
 
772
        /* lock queue so that we can muck with it */
773
        HTB_QLOCK(sch);
774
        HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
775
 
776
        q->rttim.expires = jiffies + HZ;
777
        add_timer(&q->rttim);
778
 
779
        /* scan and recompute one bucket at time */
780
        if (++q->recmp_bucket >= HTB_HSIZE)
781
                q->recmp_bucket = 0;
782
        list_for_each (p,q->hash+q->recmp_bucket) {
783
                struct htb_class *cl = list_entry(p,struct htb_class,hlist);
784
                HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
785
                                cl->classid,cl->sum_bytes,cl->sum_packets);
786
                RT_GEN (cl->sum_bytes,cl->rate_bytes);
787
                RT_GEN (cl->sum_packets,cl->rate_packets);
788
        }
789
        HTB_QUNLOCK(sch);
790
}
791
#endif
792
 
793
/**
794
 * htb_charge_class - charges ammount "bytes" to leaf and ancestors
795
 *
796
 * Routine assumes that packet "bytes" long was dequeued from leaf cl
797
 * borrowing from "level". It accounts bytes to ceil leaky bucket for
798
 * leaf and all ancestors and to rate bucket for ancestors at levels
799
 * "level" and higher. It also handles possible change of mode resulting
800
 * from the update. Note that mode can also increase here (MAY_BORROW to
801
 * CAN_SEND) because we can use more precise clock that event queue here.
802
 * In such case we remove class from event queue first.
803
 */
804
static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
805
                int level,int bytes)
806
{
807
        long toks,diff;
808
        enum htb_cmode old_mode;
809
        HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
810
 
811
#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
812
        if (toks > cl->B) toks = cl->B; \
813
        toks -= L2T(cl, cl->R, bytes); \
814
        if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
815
        cl->T = toks
816
 
817
        while (cl) {
818
                HTB_CHCL(cl);
819
                diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
820
#ifdef HTB_DEBUG
821
                if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
822
                        if (net_ratelimit())
823
                                printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
824
                                       cl->classid, diff,
825
                                       (unsigned long long) q->now,
826
                                       (unsigned long long) cl->t_c,
827
                                       q->jiffies);
828
                        diff = 1000;
829
                }
830
#endif
831
                if (cl->level >= level) {
832
                        if (cl->level == level) cl->xstats.lends++;
833
                        HTB_ACCNT (tokens,buffer,rate);
834
                } else {
835
                        cl->xstats.borrows++;
836
                        cl->tokens += diff; /* we moved t_c; update tokens */
837
                }
838
                HTB_ACCNT (ctokens,cbuffer,ceil);
839
                cl->t_c = q->now;
840
                HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
841
 
842
                old_mode = cl->cmode; diff = 0;
843
                htb_change_class_mode(q,cl,&diff);
844
                if (old_mode != cl->cmode) {
845
                        if (old_mode != HTB_CAN_SEND)
846
                                htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
847
                        if (cl->cmode != HTB_CAN_SEND)
848
                                htb_add_to_wait_tree (q,cl,diff,1);
849
                }
850
 
851
#ifdef HTB_RATECM
852
                /* update rate counters */
853
                cl->sum_bytes += bytes; cl->sum_packets++;
854
#endif
855
 
856
                /* update byte stats except for leaves which are already updated */
857
                if (cl->level) {
858
                        cl->stats.bytes += bytes;
859
                        cl->stats.packets++;
860
                }
861
                cl = cl->parent;
862
        }
863
}
864
 
865
/**
866
 * htb_do_events - make mode changes to classes at the level
867
 *
868
 * Scans event queue for pending events and applies them. Returns jiffies to
869
 * next pending event (0 for no event in pq).
870
 * Note: Aplied are events whose have cl->pq_key <= jiffies.
871
 */
872
static long htb_do_events(struct htb_sched *q,int level)
873
{
874
        int i;
875
        HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
876
                        level,q->wait_pq[level].rb_node,q->row_mask[level]);
877
        for (i = 0; i < 500; i++) {
878
                struct htb_class *cl;
879
                long diff;
880
                rb_node_t *p = q->wait_pq[level].rb_node;
881
                if (!p) return 0;
882
                while (p->rb_left) p = p->rb_left;
883
 
884
                cl = rb_entry(p, struct htb_class, pq_node);
885
                if (time_after(cl->pq_key, q->jiffies)) {
886
                        HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
887
                        return cl->pq_key - q->jiffies;
888
                }
889
                htb_safe_rb_erase(p,q->wait_pq+level);
890
                diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
891
#ifdef HTB_DEBUG
892
                if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
893
                        if (net_ratelimit())
894
                                printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
895
                                       cl->classid, diff,
896
                                       (unsigned long long) q->now,
897
                                       (unsigned long long) cl->t_c,
898
                                       q->jiffies);
899
                        diff = 1000;
900
                }
901
#endif
902
                htb_change_class_mode(q,cl,&diff);
903
                if (cl->cmode != HTB_CAN_SEND)
904
                        htb_add_to_wait_tree (q,cl,diff,2);
905
        }
906
        if (net_ratelimit())
907
                printk(KERN_WARNING "htb: too many events !\n");
908
        return HZ/10;
909
}
910
 
911
/**
912
 * htb_lookup_leaf - returns next leaf class in DRR order
913
 *
914
 * Find leaf where current feed pointers points to.
915
 */
916
static struct htb_class *
917
htb_lookup_leaf(rb_root_t *tree,int prio,rb_node_t **pptr)
918
{
919
        int i;
920
        struct {
921
                rb_node_t *root;
922
                rb_node_t **pptr;
923
        } stk[TC_HTB_MAXDEPTH],*sp = stk;
924
 
925
        BUG_TRAP(tree->rb_node);
926
        sp->root = tree->rb_node;
927
        sp->pptr = pptr;
928
 
929
        for (i = 0; i < 65535; i++) {
930
                if (!*sp->pptr) { /* we are at right end; rewind & go up */
931
                        *sp->pptr = sp->root;
932
                        while ((*sp->pptr)->rb_left)
933
                                *sp->pptr = (*sp->pptr)->rb_left;
934
                        if (sp > stk) {
935
                                sp--;
936
                                BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
937
                                htb_next_rb_node (sp->pptr);
938
                        }
939
                } else {
940
                        struct htb_class *cl;
941
                        cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
942
                        HTB_CHCL(cl);
943
                        if (!cl->level)
944
                                return cl;
945
                        (++sp)->root = cl->un.inner.feed[prio].rb_node;
946
                        sp->pptr = cl->un.inner.ptr+prio;
947
                }
948
        }
949
        BUG_TRAP(0);
950
        return NULL;
951
}
952
 
953
/* dequeues packet at given priority and level; call only if
954
   you are sure that there is active class at prio/level */
955
static struct sk_buff *
956
htb_dequeue_tree(struct htb_sched *q,int prio,int level)
957
{
958
        struct sk_buff *skb = NULL;
959
        struct htb_class *cl,*start;
960
        /* look initial class up in the row */
961
        start = cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
962
 
963
        do {
964
next:
965
                BUG_TRAP(cl);
966
                if (!cl) return NULL;
967
                HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
968
                                prio,level,cl->classid,cl->un.leaf.deficit[level]);
969
 
970
                /* class can be empty - it is unlikely but can be true if leaf
971
                   qdisc drops packets in enqueue routine or if someone used
972
                   graft operation on the leaf since last dequeue;
973
                   simply deactivate and skip such class */
974
                if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
975
                        struct htb_class *next;
976
                        htb_deactivate(q,cl);
977
 
978
                        /* row/level might become empty */
979
                        if ((q->row_mask[level] & (1 << prio)) == 0)
980
                                return NULL;
981
 
982
                        next = htb_lookup_leaf (q->row[level]+prio,
983
                                        prio,q->ptr[level]+prio);
984
                        if (cl == start) /* fix start if we just deleted it */
985
                                start = next;
986
                        cl = next;
987
                        goto next;
988
                }
989
 
990
                if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL))
991
                        break;
992
                if (!cl->warned) {
993
                        printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
994
                        cl->warned = 1;
995
                }
996
                q->nwc_hit++;
997
                htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
998
                cl = htb_lookup_leaf (q->row[level]+prio,prio,q->ptr[level]+prio);
999
        } while (cl != start);
1000
 
1001
        if (likely(skb != NULL)) {
1002
                if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
1003
                        HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
1004
                                level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
1005
                        cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
1006
                        htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1007
                }
1008
                /* this used to be after charge_class but this constelation
1009
                   gives us slightly better performance */
1010
                if (!cl->un.leaf.q->q.qlen)
1011
                        htb_deactivate (q,cl);
1012
                htb_charge_class (q,cl,level,skb->len);
1013
        }
1014
        return skb;
1015
}
1016
 
1017
static void htb_delay_by(struct Qdisc *sch,long delay)
1018
{
1019
        struct htb_sched *q = (struct htb_sched *)sch->data;
1020
        if (netif_queue_stopped(sch->dev)) return;
1021
        if (delay <= 0) delay = 1;
1022
        if (unlikely(delay > 5*HZ)) {
1023
                if (net_ratelimit())
1024
                        printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
1025
                delay = 5*HZ;
1026
        }
1027
        /* why don't use jiffies here ? because expires can be in past */
1028
        mod_timer(&q->timer, q->jiffies + delay);
1029
        sch->flags |= TCQ_F_THROTTLED;
1030
        sch->stats.overlimits++;
1031
        HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
1032
}
1033
 
1034
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1035
{
1036
        struct sk_buff *skb = NULL;
1037
        struct htb_sched *q = (struct htb_sched *)sch->data;
1038
        int level;
1039
        long min_delay;
1040
#ifdef HTB_DEBUG
1041
        int evs_used = 0;
1042
#endif
1043
 
1044
        q->jiffies = jiffies;
1045
        HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
1046
                        sch->q.qlen);
1047
 
1048
        /* try to dequeue direct packets as high prio (!) to minimize cpu work */
1049
        if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
1050
                sch->flags &= ~TCQ_F_THROTTLED;
1051
                sch->q.qlen--;
1052
                return skb;
1053
        }
1054
 
1055
        if (!sch->q.qlen) goto fin;
1056
        PSCHED_GET_TIME(q->now);
1057
 
1058
        min_delay = LONG_MAX;
1059
        q->nwc_hit = 0;
1060
        for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1061
                /* common case optimization - skip event handler quickly */
1062
                int m;
1063
                long delay;
1064
                if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1065
                        delay = htb_do_events(q,level);
1066
                        q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
1067
#ifdef HTB_DEBUG
1068
                        evs_used++;
1069
#endif
1070
                } else
1071
                        delay = q->near_ev_cache[level] - q->jiffies;
1072
 
1073
                if (delay && min_delay > delay)
1074
                        min_delay = delay;
1075
                m = ~q->row_mask[level];
1076
                while (m != (int)(-1)) {
1077
                        int prio = ffz (m);
1078
                        m |= 1 << prio;
1079
                        skb = htb_dequeue_tree(q,prio,level);
1080
                        if (likely(skb != NULL)) {
1081
                                sch->q.qlen--;
1082
                                sch->flags &= ~TCQ_F_THROTTLED;
1083
                                goto fin;
1084
                        }
1085
                }
1086
        }
1087
#ifdef HTB_DEBUG
1088
        if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
1089
                if (min_delay == LONG_MAX) {
1090
                        printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
1091
                                        evs_used,q->jiffies,jiffies);
1092
                        htb_debug_dump(q);
1093
                } else
1094
                        printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
1095
                                        "too small rate\n",min_delay);
1096
        }
1097
#endif
1098
        htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
1099
fin:
1100
        HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
1101
        return skb;
1102
}
1103
 
1104
/* try to drop from each class (by prio) until one succeed */
1105
static unsigned int htb_drop(struct Qdisc* sch)
1106
{
1107
        struct htb_sched *q = (struct htb_sched *)sch->data;
1108
        int prio;
1109
 
1110
        for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1111
                struct list_head *p;
1112
                list_for_each (p,q->drops+prio) {
1113
                        struct htb_class *cl = list_entry(p, struct htb_class,
1114
                                                          un.leaf.drop_list);
1115
                        unsigned int len;
1116
                        if (cl->un.leaf.q->ops->drop &&
1117
                                (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1118
                                sch->q.qlen--;
1119
                                if (!cl->un.leaf.q->q.qlen)
1120
                                        htb_deactivate (q,cl);
1121
                                return len;
1122
                        }
1123
                }
1124
        }
1125
        return 0;
1126
}
1127
 
1128
/* reset all classes */
1129
/* always caled under BH & queue lock */
1130
static void htb_reset(struct Qdisc* sch)
1131
{
1132
        struct htb_sched *q = (struct htb_sched *)sch->data;
1133
        int i;
1134
        HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
1135
 
1136
        for (i = 0; i < HTB_HSIZE; i++) {
1137
                struct list_head *p;
1138
                list_for_each (p,q->hash+i) {
1139
                        struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1140
                        if (cl->level)
1141
                                memset(&cl->un.inner,0,sizeof(cl->un.inner));
1142
                        else {
1143
                                if (cl->un.leaf.q)
1144
                                        qdisc_reset(cl->un.leaf.q);
1145
                                INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1146
                        }
1147
                        cl->prio_activity = 0;
1148
                        cl->cmode = HTB_CAN_SEND;
1149
#ifdef HTB_DEBUG
1150
                        cl->pq_node.rb_color = -1;
1151
                        memset(cl->node,255,sizeof(cl->node));
1152
#endif
1153
 
1154
                }
1155
        }
1156
        sch->flags &= ~TCQ_F_THROTTLED;
1157
        del_timer(&q->timer);
1158
        __skb_queue_purge(&q->direct_queue);
1159
        sch->q.qlen = 0;
1160
        memset(q->row,0,sizeof(q->row));
1161
        memset(q->row_mask,0,sizeof(q->row_mask));
1162
        memset(q->wait_pq,0,sizeof(q->wait_pq));
1163
        memset(q->ptr,0,sizeof(q->ptr));
1164
        for (i = 0; i < TC_HTB_NUMPRIO; i++)
1165
                INIT_LIST_HEAD(q->drops+i);
1166
}
1167
 
1168
static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1169
{
1170
        struct htb_sched *q = (struct htb_sched*)sch->data;
1171
        struct rtattr *tb[TCA_HTB_INIT];
1172
        struct tc_htb_glob *gopt;
1173
        int i;
1174
#ifdef HTB_DEBUG
1175
        printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
1176
                          HTB_VER >> 16,HTB_VER & 0xffff);
1177
#endif
1178
        if (!opt || rtattr_parse(tb, TCA_HTB_INIT, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1179
                        tb[TCA_HTB_INIT-1] == NULL ||
1180
                        RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1181
                printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1182
                return -EINVAL;
1183
        }
1184
        gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1185
        if (gopt->version != HTB_VER >> 16) {
1186
                printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1187
                                HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1188
                return -EINVAL;
1189
        }
1190
        memset(q,0,sizeof(*q));
1191
        q->debug = gopt->debug;
1192
        HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
1193
 
1194
        INIT_LIST_HEAD(&q->root);
1195
        for (i = 0; i < HTB_HSIZE; i++)
1196
                INIT_LIST_HEAD(q->hash+i);
1197
        for (i = 0; i < TC_HTB_NUMPRIO; i++)
1198
                INIT_LIST_HEAD(q->drops+i);
1199
 
1200
        init_timer(&q->timer);
1201
        skb_queue_head_init(&q->direct_queue);
1202
 
1203
        q->direct_qlen = sch->dev->tx_queue_len;
1204
        if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1205
                q->direct_qlen = 2;
1206
        q->timer.function = htb_timer;
1207
        q->timer.data = (unsigned long)sch;
1208
 
1209
#ifdef HTB_RATECM
1210
        init_timer(&q->rttim);
1211
        q->rttim.function = htb_rate_timer;
1212
        q->rttim.data = (unsigned long)sch;
1213
        q->rttim.expires = jiffies + HZ;
1214
        add_timer(&q->rttim);
1215
#endif
1216
        if ((q->rate2quantum = gopt->rate2quantum) < 1)
1217
                q->rate2quantum = 1;
1218
        q->defcls = gopt->defcls;
1219
 
1220
        MOD_INC_USE_COUNT;
1221
        return 0;
1222
}
1223
 
1224
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1225
{
1226
        struct htb_sched *q = (struct htb_sched*)sch->data;
1227
        unsigned char    *b = skb->tail;
1228
        struct rtattr *rta;
1229
        struct tc_htb_glob gopt;
1230
        HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
1231
        /* stats */
1232
        HTB_QLOCK(sch);
1233
        gopt.direct_pkts = q->direct_pkts;
1234
 
1235
#ifdef HTB_DEBUG
1236
        if (HTB_DBG_COND(0,2))
1237
                htb_debug_dump(q);
1238
#endif
1239
        gopt.version = HTB_VER;
1240
        gopt.rate2quantum = q->rate2quantum;
1241
        gopt.defcls = q->defcls;
1242
        gopt.debug = q->debug;
1243
        rta = (struct rtattr*)b;
1244
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1245
        RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1246
        rta->rta_len = skb->tail - b;
1247
        sch->stats.qlen = sch->q.qlen;
1248
        RTA_PUT(skb, TCA_STATS, sizeof(sch->stats), &sch->stats);
1249
        HTB_QUNLOCK(sch);
1250
        return skb->len;
1251
rtattr_failure:
1252
        HTB_QUNLOCK(sch);
1253
        skb_trim(skb, skb->tail - skb->data);
1254
        return -1;
1255
}
1256
 
1257
static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1258
        struct sk_buff *skb, struct tcmsg *tcm)
1259
{
1260
#ifdef HTB_DEBUG
1261
        struct htb_sched *q = (struct htb_sched*)sch->data;
1262
#endif
1263
        struct htb_class *cl = (struct htb_class*)arg;
1264
        unsigned char    *b = skb->tail;
1265
        struct rtattr *rta;
1266
        struct tc_htb_opt opt;
1267
 
1268
        HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
1269
 
1270
        HTB_QLOCK(sch);
1271
        tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1272
        tcm->tcm_handle = cl->classid;
1273
        if (!cl->level && cl->un.leaf.q) {
1274
                tcm->tcm_info = cl->un.leaf.q->handle;
1275
                cl->stats.qlen = cl->un.leaf.q->q.qlen;
1276
        }
1277
 
1278
        rta = (struct rtattr*)b;
1279
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1280
 
1281
        memset (&opt,0,sizeof(opt));
1282
 
1283
        opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
1284
        opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
1285
        opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
1286
        opt.level = cl->level;
1287
        RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1288
        rta->rta_len = skb->tail - b;
1289
 
1290
#ifdef HTB_RATECM
1291
        cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
1292
        cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
1293
#endif
1294
 
1295
        cl->xstats.tokens = cl->tokens;
1296
        cl->xstats.ctokens = cl->ctokens;
1297
        RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
1298
        RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
1299
        HTB_QUNLOCK(sch);
1300
        return skb->len;
1301
rtattr_failure:
1302
        HTB_QUNLOCK(sch);
1303
        skb_trim(skb, b - skb->data);
1304
        return -1;
1305
}
1306
 
1307
static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1308
        struct Qdisc **old)
1309
{
1310
        struct htb_class *cl = (struct htb_class*)arg;
1311
 
1312
        if (cl && !cl->level) {
1313
                if (new == NULL && (new = qdisc_create_dflt(sch->dev,
1314
                                        &pfifo_qdisc_ops)) == NULL)
1315
                                        return -ENOBUFS;
1316
                sch_tree_lock(sch);
1317
                if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1318
                        if (cl->prio_activity)
1319
                                htb_deactivate ((struct htb_sched*)sch->data,cl);
1320
 
1321
                        /* TODO: is it correct ? Why CBQ doesn't do it ? */
1322
                        sch->q.qlen -= (*old)->q.qlen;
1323
                        qdisc_reset(*old);
1324
                }
1325
                sch_tree_unlock(sch);
1326
                return 0;
1327
        }
1328
        return -ENOENT;
1329
}
1330
 
1331
static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1332
{
1333
        struct htb_class *cl = (struct htb_class*)arg;
1334
        return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1335
}
1336
 
1337
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1338
{
1339
#ifdef HTB_DEBUG
1340
        struct htb_sched *q = (struct htb_sched *)sch->data;
1341
#endif
1342
        struct htb_class *cl = htb_find(classid,sch);
1343
        HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
1344
        if (cl)
1345
                cl->refcnt++;
1346
        return (unsigned long)cl;
1347
}
1348
 
1349
static void htb_destroy_filters(struct tcf_proto **fl)
1350
{
1351
        struct tcf_proto *tp;
1352
 
1353
        while ((tp = *fl) != NULL) {
1354
                *fl = tp->next;
1355
                tcf_destroy(tp);
1356
        }
1357
}
1358
 
1359
static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1360
{
1361
        struct htb_sched *q = (struct htb_sched *)sch->data;
1362
        HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
1363
        if (!cl->level) {
1364
                BUG_TRAP(cl->un.leaf.q);
1365
                sch->q.qlen -= cl->un.leaf.q->q.qlen;
1366
                qdisc_destroy(cl->un.leaf.q);
1367
        }
1368
        qdisc_put_rtab(cl->rate);
1369
        qdisc_put_rtab(cl->ceil);
1370
 
1371
#ifdef CONFIG_NET_ESTIMATOR
1372
        qdisc_kill_estimator(&cl->stats);
1373
#endif
1374
        htb_destroy_filters (&cl->filter_list);
1375
 
1376
        while (!list_empty(&cl->children))
1377
                htb_destroy_class (sch,list_entry(cl->children.next,
1378
                                        struct htb_class,sibling));
1379
 
1380
        /* note: this delete may happen twice (see htb_delete) */
1381
        list_del(&cl->hlist);
1382
        list_del(&cl->sibling);
1383
 
1384
        if (cl->prio_activity)
1385
                htb_deactivate (q,cl);
1386
 
1387
        if (cl->cmode != HTB_CAN_SEND)
1388
                htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1389
 
1390
        kfree(cl);
1391
}
1392
 
1393
/* always caled under BH & queue lock */
1394
static void htb_destroy(struct Qdisc* sch)
1395
{
1396
        struct htb_sched *q = (struct htb_sched *)sch->data;
1397
        HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1398
 
1399
        del_timer_sync (&q->timer);
1400
#ifdef HTB_RATECM
1401
        del_timer_sync (&q->rttim);
1402
#endif
1403
        /* This line used to be after htb_destroy_class call below
1404
           and surprisingly it worked in 2.4. But it must precede it
1405
           because filter need its target class alive to be able to call
1406
           unbind_filter on it (without Oops). */
1407
        htb_destroy_filters(&q->filter_list);
1408
 
1409
        while (!list_empty(&q->root))
1410
                htb_destroy_class (sch,list_entry(q->root.next,
1411
                                        struct htb_class,sibling));
1412
 
1413
        __skb_queue_purge(&q->direct_queue);
1414
        MOD_DEC_USE_COUNT;
1415
}
1416
 
1417
static int htb_delete(struct Qdisc *sch, unsigned long arg)
1418
{
1419
        struct htb_sched *q = (struct htb_sched *)sch->data;
1420
        struct htb_class *cl = (struct htb_class*)arg;
1421
        HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1422
 
1423
        // TODO: why don't allow to delete subtree ? references ? does
1424
        // tc subsys quarantee us that in htb_destroy it holds no class
1425
        // refs so that we can remove children safely there ?
1426
        if (!list_empty(&cl->children) || cl->filter_cnt)
1427
                return -EBUSY;
1428
 
1429
        sch_tree_lock(sch);
1430
 
1431
        /* delete from hash and active; remainder in destroy_class */
1432
        list_del_init(&cl->hlist);
1433
        if (cl->prio_activity)
1434
                htb_deactivate (q,cl);
1435
 
1436
        if (--cl->refcnt == 0)
1437
                htb_destroy_class(sch,cl);
1438
 
1439
        sch_tree_unlock(sch);
1440
        return 0;
1441
}
1442
 
1443
static void htb_put(struct Qdisc *sch, unsigned long arg)
1444
{
1445
#ifdef HTB_DEBUG
1446
        struct htb_sched *q = (struct htb_sched *)sch->data;
1447
#endif
1448
        struct htb_class *cl = (struct htb_class*)arg;
1449
        HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1450
 
1451
        if (--cl->refcnt == 0)
1452
                htb_destroy_class(sch,cl);
1453
}
1454
 
1455
static int htb_change_class(struct Qdisc *sch, u32 classid,
1456
                u32 parentid, struct rtattr **tca, unsigned long *arg)
1457
{
1458
        int err = -EINVAL;
1459
        struct htb_sched *q = (struct htb_sched *)sch->data;
1460
        struct htb_class *cl = (struct htb_class*)*arg,*parent;
1461
        struct rtattr *opt = tca[TCA_OPTIONS-1];
1462
        struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1463
        struct rtattr *tb[TCA_HTB_RTAB];
1464
        struct tc_htb_opt *hopt;
1465
 
1466
        /* extract all subattrs from opt attr */
1467
        if (!opt || rtattr_parse(tb, TCA_HTB_RTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1468
                        tb[TCA_HTB_PARMS-1] == NULL ||
1469
                        RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1470
                goto failure;
1471
 
1472
        parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1473
 
1474
        hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1475
        HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1476
        rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1477
        ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1478
        if (!rtab || !ctab) goto failure;
1479
 
1480
        if (!cl) { /* new class */
1481
                struct Qdisc *new_q;
1482
                /* check for valid classid */
1483
                if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1484
                        goto failure;
1485
 
1486
                /* check maximal depth */
1487
                if (parent && parent->parent && parent->parent->level < 2) {
1488
                        printk(KERN_ERR "htb: tree is too deep\n");
1489
                        goto failure;
1490
                }
1491
                err = -ENOBUFS;
1492
                if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1493
                        goto failure;
1494
 
1495
                memset(cl, 0, sizeof(*cl));
1496
                cl->refcnt = 1;
1497
                INIT_LIST_HEAD(&cl->sibling);
1498
                INIT_LIST_HEAD(&cl->hlist);
1499
                INIT_LIST_HEAD(&cl->children);
1500
                INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1501
#ifdef HTB_DEBUG
1502
                cl->magic = HTB_CMAGIC;
1503
#endif
1504
 
1505
                /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1506
                   so that can't be used inside of sch_tree_lock
1507
                   -- thanks to Karlis Peisenieks */
1508
                new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1509
                sch_tree_lock(sch);
1510
                if (parent && !parent->level) {
1511
                        /* turn parent into inner node */
1512
                        sch->q.qlen -= parent->un.leaf.q->q.qlen;
1513
                        qdisc_destroy (parent->un.leaf.q);
1514
                        if (parent->prio_activity)
1515
                                htb_deactivate (q,parent);
1516
 
1517
                        /* remove from evt list because of level change */
1518
                        if (parent->cmode != HTB_CAN_SEND) {
1519
                                htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
1520
                                parent->cmode = HTB_CAN_SEND;
1521
                        }
1522
                        parent->level = (parent->parent ? parent->parent->level
1523
                                        : TC_HTB_MAXDEPTH) - 1;
1524
                        memset (&parent->un.inner,0,sizeof(parent->un.inner));
1525
                }
1526
                /* leaf (we) needs elementary qdisc */
1527
                cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1528
 
1529
                cl->classid = classid; cl->parent = parent;
1530
 
1531
                /* set class to be in HTB_CAN_SEND state */
1532
                cl->tokens = hopt->buffer;
1533
                cl->ctokens = hopt->cbuffer;
1534
                cl->mbuffer = 60000000; /* 1min */
1535
                PSCHED_GET_TIME(cl->t_c);
1536
                cl->cmode = HTB_CAN_SEND;
1537
 
1538
                /* attach to the hash list and parent's family */
1539
                list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1540
                list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1541
#ifdef HTB_DEBUG
1542
                {
1543
                        int i;
1544
                        for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
1545
                        cl->pq_node.rb_color = -1;
1546
                }
1547
#endif
1548
        } else sch_tree_lock(sch);
1549
 
1550
        /* it used to be a nasty bug here, we have to check that node
1551
           is really leaf before changing cl->un.leaf ! */
1552
        if (!cl->level) {
1553
                cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1554
                if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1555
                        printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
1556
                        cl->un.leaf.quantum = 1000;
1557
                }
1558
                if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1559
                        printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
1560
                        cl->un.leaf.quantum = 200000;
1561
                }
1562
                if (hopt->quantum)
1563
                        cl->un.leaf.quantum = hopt->quantum;
1564
                if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1565
                        cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1566
        }
1567
 
1568
        cl->buffer = hopt->buffer;
1569
        cl->cbuffer = hopt->cbuffer;
1570
        if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1571
        if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1572
        sch_tree_unlock(sch);
1573
 
1574
        *arg = (unsigned long)cl;
1575
        return 0;
1576
 
1577
failure:
1578
        if (rtab) qdisc_put_rtab(rtab);
1579
        if (ctab) qdisc_put_rtab(ctab);
1580
        return err;
1581
}
1582
 
1583
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1584
{
1585
        struct htb_sched *q = (struct htb_sched *)sch->data;
1586
        struct htb_class *cl = (struct htb_class *)arg;
1587
        struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1588
        HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1589
        return fl;
1590
}
1591
 
1592
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1593
        u32 classid)
1594
{
1595
        struct htb_sched *q = (struct htb_sched *)sch->data;
1596
        struct htb_class *cl = htb_find (classid,sch);
1597
        HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1598
        /*if (cl && !cl->level) return 0;
1599
          The line above used to be there to prevent attaching filters to
1600
          leaves. But at least tc_index filter uses this just to get class
1601
          for other reasons so that we have to allow for it.
1602
          ----
1603
          19.6.2002 As Werner explained it is ok - bind filter is just
1604
          another way to "lock" the class - unlike "get" this lock can
1605
          be broken by class during destroy IIUC.
1606
         */
1607
        if (cl)
1608
                cl->filter_cnt++;
1609
        else
1610
                q->filter_cnt++;
1611
        return (unsigned long)cl;
1612
}
1613
 
1614
static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1615
{
1616
        struct htb_sched *q = (struct htb_sched *)sch->data;
1617
        struct htb_class *cl = (struct htb_class *)arg;
1618
        HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1619
        if (cl)
1620
                cl->filter_cnt--;
1621
        else
1622
                q->filter_cnt--;
1623
}
1624
 
1625
static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1626
{
1627
        struct htb_sched *q = (struct htb_sched *)sch->data;
1628
        int i;
1629
 
1630
        if (arg->stop)
1631
                return;
1632
 
1633
        for (i = 0; i < HTB_HSIZE; i++) {
1634
                struct list_head *p;
1635
                list_for_each (p,q->hash+i) {
1636
                        struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1637
                        if (arg->count < arg->skip) {
1638
                                arg->count++;
1639
                                continue;
1640
                        }
1641
                        if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1642
                                arg->stop = 1;
1643
                                return;
1644
                        }
1645
                        arg->count++;
1646
                }
1647
        }
1648
}
1649
 
1650
static struct Qdisc_class_ops htb_class_ops =
1651
{
1652
    htb_graft,
1653
    htb_leaf,
1654
    htb_get,
1655
    htb_put,
1656
    htb_change_class,
1657
    htb_delete,
1658
    htb_walk,
1659
 
1660
    htb_find_tcf,
1661
    htb_bind_filter,
1662
    htb_unbind_filter,
1663
 
1664
    htb_dump_class,
1665
};
1666
 
1667
struct Qdisc_ops htb_qdisc_ops =
1668
{
1669
    NULL,
1670
    &htb_class_ops,
1671
    "htb",
1672
    sizeof(struct htb_sched),
1673
 
1674
    htb_enqueue,
1675
    htb_dequeue,
1676
    htb_requeue,
1677
    htb_drop,
1678
 
1679
    htb_init,
1680
    htb_reset,
1681
    htb_destroy,
1682
    NULL /* htb_change */,
1683
 
1684
    htb_dump,
1685
};
1686
 
1687
#ifdef MODULE
1688
int init_module(void)
1689
{
1690
    return register_qdisc(&htb_qdisc_ops);
1691
}
1692
 
1693
void cleanup_module(void)
1694
{
1695
    unregister_qdisc(&htb_qdisc_ops);
1696
}
1697
MODULE_LICENSE("GPL");
1698
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.