OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [tags/] [linux-2.6/] [linux-2.6.24_or32_unified_v2.3/] [net/] [sched/] [sch_hfsc.c] - Blame information for rev 8

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3
 *
4
 * This program is free software; you can redistribute it and/or
5
 * modify it under the terms of the GNU General Public License
6
 * as published by the Free Software Foundation; either version 2
7
 * of the License, or (at your option) any later version.
8
 *
9
 * 2003-10-17 - Ported from altq
10
 */
11
/*
12
 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13
 *
14
 * Permission to use, copy, modify, and distribute this software and
15
 * its documentation is hereby granted (including for commercial or
16
 * for-profit use), provided that both the copyright notice and this
17
 * permission notice appear in all copies of the software, derivative
18
 * works, or modified versions, and any portions thereof.
19
 *
20
 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21
 * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
22
 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23
 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25
 * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28
 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29
 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32
 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33
 * DAMAGE.
34
 *
35
 * Carnegie Mellon encourages (but does not require) users of this
36
 * software to return any improvements or extensions that they make,
37
 * and to grant Carnegie Mellon the rights to redistribute these
38
 * changes without encumbrance.
39
 */
40
/*
41
 * H-FSC is described in Proceedings of SIGCOMM'97,
42
 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43
 * Real-Time and Priority Service"
44
 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45
 *
46
 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47
 * when a class has an upperlimit, the fit-time is computed from the
48
 * upperlimit service curve.  the link-sharing scheduler does not schedule
49
 * a class whose fit-time exceeds the current time.
50
 */
51
 
52
#include <linux/kernel.h>
53
#include <linux/module.h>
54
#include <linux/types.h>
55
#include <linux/errno.h>
56
#include <linux/compiler.h>
57
#include <linux/spinlock.h>
58
#include <linux/skbuff.h>
59
#include <linux/string.h>
60
#include <linux/slab.h>
61
#include <linux/list.h>
62
#include <linux/rbtree.h>
63
#include <linux/init.h>
64
#include <linux/rtnetlink.h>
65
#include <linux/pkt_sched.h>
66
#include <net/netlink.h>
67
#include <net/pkt_sched.h>
68
#include <net/pkt_cls.h>
69
#include <asm/div64.h>
70
 
71
/*
72
 * kernel internal service curve representation:
73
 *   coordinates are given by 64 bit unsigned integers.
74
 *   x-axis: unit is clock count.
75
 *   y-axis: unit is byte.
76
 *
77
 *   The service curve parameters are converted to the internal
78
 *   representation. The slope values are scaled to avoid overflow.
79
 *   the inverse slope values as well as the y-projection of the 1st
80
 *   segment are kept in order to to avoid 64-bit divide operations
81
 *   that are expensive on 32-bit architectures.
82
 */
83
 
84
struct internal_sc
85
{
86
        u64     sm1;    /* scaled slope of the 1st segment */
87
        u64     ism1;   /* scaled inverse-slope of the 1st segment */
88
        u64     dx;     /* the x-projection of the 1st segment */
89
        u64     dy;     /* the y-projection of the 1st segment */
90
        u64     sm2;    /* scaled slope of the 2nd segment */
91
        u64     ism2;   /* scaled inverse-slope of the 2nd segment */
92
};
93
 
94
/* runtime service curve */
95
struct runtime_sc
96
{
97
        u64     x;      /* current starting position on x-axis */
98
        u64     y;      /* current starting position on y-axis */
99
        u64     sm1;    /* scaled slope of the 1st segment */
100
        u64     ism1;   /* scaled inverse-slope of the 1st segment */
101
        u64     dx;     /* the x-projection of the 1st segment */
102
        u64     dy;     /* the y-projection of the 1st segment */
103
        u64     sm2;    /* scaled slope of the 2nd segment */
104
        u64     ism2;   /* scaled inverse-slope of the 2nd segment */
105
};
106
 
107
enum hfsc_class_flags
108
{
109
        HFSC_RSC = 0x1,
110
        HFSC_FSC = 0x2,
111
        HFSC_USC = 0x4
112
};
113
 
114
struct hfsc_class
115
{
116
        u32             classid;        /* class id */
117
        unsigned int    refcnt;         /* usage count */
118
 
119
        struct gnet_stats_basic bstats;
120
        struct gnet_stats_queue qstats;
121
        struct gnet_stats_rate_est rate_est;
122
        unsigned int    level;          /* class level in hierarchy */
123
        struct tcf_proto *filter_list;  /* filter list */
124
        unsigned int    filter_cnt;     /* filter count */
125
 
126
        struct hfsc_sched *sched;       /* scheduler data */
127
        struct hfsc_class *cl_parent;   /* parent class */
128
        struct list_head siblings;      /* sibling classes */
129
        struct list_head children;      /* child classes */
130
        struct Qdisc    *qdisc;         /* leaf qdisc */
131
 
132
        struct rb_node el_node;         /* qdisc's eligible tree member */
133
        struct rb_root vt_tree;         /* active children sorted by cl_vt */
134
        struct rb_node vt_node;         /* parent's vt_tree member */
135
        struct rb_root cf_tree;         /* active children sorted by cl_f */
136
        struct rb_node cf_node;         /* parent's cf_heap member */
137
        struct list_head hlist;         /* hash list member */
138
        struct list_head dlist;         /* drop list member */
139
 
140
        u64     cl_total;               /* total work in bytes */
141
        u64     cl_cumul;               /* cumulative work in bytes done by
142
                                           real-time criteria */
143
 
144
        u64     cl_d;                   /* deadline*/
145
        u64     cl_e;                   /* eligible time */
146
        u64     cl_vt;                  /* virtual time */
147
        u64     cl_f;                   /* time when this class will fit for
148
                                           link-sharing, max(myf, cfmin) */
149
        u64     cl_myf;                 /* my fit-time (calculated from this
150
                                           class's own upperlimit curve) */
151
        u64     cl_myfadj;              /* my fit-time adjustment (to cancel
152
                                           history dependence) */
153
        u64     cl_cfmin;               /* earliest children's fit-time (used
154
                                           with cl_myf to obtain cl_f) */
155
        u64     cl_cvtmin;              /* minimal virtual time among the
156
                                           children fit for link-sharing
157
                                           (monotonic within a period) */
158
        u64     cl_vtadj;               /* intra-period cumulative vt
159
                                           adjustment */
160
        u64     cl_vtoff;               /* inter-period cumulative vt offset */
161
        u64     cl_cvtmax;              /* max child's vt in the last period */
162
        u64     cl_cvtoff;              /* cumulative cvtmax of all periods */
163
        u64     cl_pcvtoff;             /* parent's cvtoff at initialization
164
                                           time */
165
 
166
        struct internal_sc cl_rsc;      /* internal real-time service curve */
167
        struct internal_sc cl_fsc;      /* internal fair service curve */
168
        struct internal_sc cl_usc;      /* internal upperlimit service curve */
169
        struct runtime_sc cl_deadline;  /* deadline curve */
170
        struct runtime_sc cl_eligible;  /* eligible curve */
171
        struct runtime_sc cl_virtual;   /* virtual curve */
172
        struct runtime_sc cl_ulimit;    /* upperlimit curve */
173
 
174
        unsigned long   cl_flags;       /* which curves are valid */
175
        unsigned long   cl_vtperiod;    /* vt period sequence number */
176
        unsigned long   cl_parentperiod;/* parent's vt period sequence number*/
177
        unsigned long   cl_nactive;     /* number of active children */
178
};
179
 
180
#define HFSC_HSIZE      16
181
 
182
struct hfsc_sched
183
{
184
        u16     defcls;                         /* default class id */
185
        struct hfsc_class root;                 /* root class */
186
        struct list_head clhash[HFSC_HSIZE];    /* class hash */
187
        struct rb_root eligible;                /* eligible tree */
188
        struct list_head droplist;              /* active leaf class list (for
189
                                                   dropping) */
190
        struct sk_buff_head requeue;            /* requeued packet */
191
        struct qdisc_watchdog watchdog;         /* watchdog timer */
192
};
193
 
194
#define HT_INFINITY     0xffffffffffffffffULL   /* infinite time value */
195
 
196
 
197
/*
198
 * eligible tree holds backlogged classes being sorted by their eligible times.
199
 * there is one eligible tree per hfsc instance.
200
 */
201
 
202
static void
203
eltree_insert(struct hfsc_class *cl)
204
{
205
        struct rb_node **p = &cl->sched->eligible.rb_node;
206
        struct rb_node *parent = NULL;
207
        struct hfsc_class *cl1;
208
 
209
        while (*p != NULL) {
210
                parent = *p;
211
                cl1 = rb_entry(parent, struct hfsc_class, el_node);
212
                if (cl->cl_e >= cl1->cl_e)
213
                        p = &parent->rb_right;
214
                else
215
                        p = &parent->rb_left;
216
        }
217
        rb_link_node(&cl->el_node, parent, p);
218
        rb_insert_color(&cl->el_node, &cl->sched->eligible);
219
}
220
 
221
static inline void
222
eltree_remove(struct hfsc_class *cl)
223
{
224
        rb_erase(&cl->el_node, &cl->sched->eligible);
225
}
226
 
227
static inline void
228
eltree_update(struct hfsc_class *cl)
229
{
230
        eltree_remove(cl);
231
        eltree_insert(cl);
232
}
233
 
234
/* find the class with the minimum deadline among the eligible classes */
235
static inline struct hfsc_class *
236
eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
237
{
238
        struct hfsc_class *p, *cl = NULL;
239
        struct rb_node *n;
240
 
241
        for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
242
                p = rb_entry(n, struct hfsc_class, el_node);
243
                if (p->cl_e > cur_time)
244
                        break;
245
                if (cl == NULL || p->cl_d < cl->cl_d)
246
                        cl = p;
247
        }
248
        return cl;
249
}
250
 
251
/* find the class with minimum eligible time among the eligible classes */
252
static inline struct hfsc_class *
253
eltree_get_minel(struct hfsc_sched *q)
254
{
255
        struct rb_node *n;
256
 
257
        n = rb_first(&q->eligible);
258
        if (n == NULL)
259
                return NULL;
260
        return rb_entry(n, struct hfsc_class, el_node);
261
}
262
 
263
/*
264
 * vttree holds holds backlogged child classes being sorted by their virtual
265
 * time. each intermediate class has one vttree.
266
 */
267
static void
268
vttree_insert(struct hfsc_class *cl)
269
{
270
        struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
271
        struct rb_node *parent = NULL;
272
        struct hfsc_class *cl1;
273
 
274
        while (*p != NULL) {
275
                parent = *p;
276
                cl1 = rb_entry(parent, struct hfsc_class, vt_node);
277
                if (cl->cl_vt >= cl1->cl_vt)
278
                        p = &parent->rb_right;
279
                else
280
                        p = &parent->rb_left;
281
        }
282
        rb_link_node(&cl->vt_node, parent, p);
283
        rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
284
}
285
 
286
static inline void
287
vttree_remove(struct hfsc_class *cl)
288
{
289
        rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
290
}
291
 
292
static inline void
293
vttree_update(struct hfsc_class *cl)
294
{
295
        vttree_remove(cl);
296
        vttree_insert(cl);
297
}
298
 
299
static inline struct hfsc_class *
300
vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
301
{
302
        struct hfsc_class *p;
303
        struct rb_node *n;
304
 
305
        for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
306
                p = rb_entry(n, struct hfsc_class, vt_node);
307
                if (p->cl_f <= cur_time)
308
                        return p;
309
        }
310
        return NULL;
311
}
312
 
313
/*
314
 * get the leaf class with the minimum vt in the hierarchy
315
 */
316
static struct hfsc_class *
317
vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
318
{
319
        /* if root-class's cfmin is bigger than cur_time nothing to do */
320
        if (cl->cl_cfmin > cur_time)
321
                return NULL;
322
 
323
        while (cl->level > 0) {
324
                cl = vttree_firstfit(cl, cur_time);
325
                if (cl == NULL)
326
                        return NULL;
327
                /*
328
                 * update parent's cl_cvtmin.
329
                 */
330
                if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
331
                        cl->cl_parent->cl_cvtmin = cl->cl_vt;
332
        }
333
        return cl;
334
}
335
 
336
static void
337
cftree_insert(struct hfsc_class *cl)
338
{
339
        struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
340
        struct rb_node *parent = NULL;
341
        struct hfsc_class *cl1;
342
 
343
        while (*p != NULL) {
344
                parent = *p;
345
                cl1 = rb_entry(parent, struct hfsc_class, cf_node);
346
                if (cl->cl_f >= cl1->cl_f)
347
                        p = &parent->rb_right;
348
                else
349
                        p = &parent->rb_left;
350
        }
351
        rb_link_node(&cl->cf_node, parent, p);
352
        rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
353
}
354
 
355
static inline void
356
cftree_remove(struct hfsc_class *cl)
357
{
358
        rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
359
}
360
 
361
static inline void
362
cftree_update(struct hfsc_class *cl)
363
{
364
        cftree_remove(cl);
365
        cftree_insert(cl);
366
}
367
 
368
/*
369
 * service curve support functions
370
 *
371
 *  external service curve parameters
372
 *      m: bps
373
 *      d: us
374
 *  internal service curve parameters
375
 *      sm: (bytes/psched_us) << SM_SHIFT
376
 *      ism: (psched_us/byte) << ISM_SHIFT
377
 *      dx: psched_us
378
 *
379
 * The clock source resolution with ktime is 1.024us.
380
 *
381
 * sm and ism are scaled in order to keep effective digits.
382
 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
383
 * digits in decimal using the following table.
384
 *
385
 *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
386
 *  ------------+-------------------------------------------------------
387
 *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
388
 *
389
 *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
390
 */
391
#define SM_SHIFT        20
392
#define ISM_SHIFT       18
393
 
394
#define SM_MASK         ((1ULL << SM_SHIFT) - 1)
395
#define ISM_MASK        ((1ULL << ISM_SHIFT) - 1)
396
 
397
static inline u64
398
seg_x2y(u64 x, u64 sm)
399
{
400
        u64 y;
401
 
402
        /*
403
         * compute
404
         *      y = x * sm >> SM_SHIFT
405
         * but divide it for the upper and lower bits to avoid overflow
406
         */
407
        y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
408
        return y;
409
}
410
 
411
static inline u64
412
seg_y2x(u64 y, u64 ism)
413
{
414
        u64 x;
415
 
416
        if (y == 0)
417
                x = 0;
418
        else if (ism == HT_INFINITY)
419
                x = HT_INFINITY;
420
        else {
421
                x = (y >> ISM_SHIFT) * ism
422
                    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
423
        }
424
        return x;
425
}
426
 
427
/* Convert m (bps) into sm (bytes/psched us) */
428
static u64
429
m2sm(u32 m)
430
{
431
        u64 sm;
432
 
433
        sm = ((u64)m << SM_SHIFT);
434
        sm += PSCHED_TICKS_PER_SEC - 1;
435
        do_div(sm, PSCHED_TICKS_PER_SEC);
436
        return sm;
437
}
438
 
439
/* convert m (bps) into ism (psched us/byte) */
440
static u64
441
m2ism(u32 m)
442
{
443
        u64 ism;
444
 
445
        if (m == 0)
446
                ism = HT_INFINITY;
447
        else {
448
                ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
449
                ism += m - 1;
450
                do_div(ism, m);
451
        }
452
        return ism;
453
}
454
 
455
/* convert d (us) into dx (psched us) */
456
static u64
457
d2dx(u32 d)
458
{
459
        u64 dx;
460
 
461
        dx = ((u64)d * PSCHED_TICKS_PER_SEC);
462
        dx += USEC_PER_SEC - 1;
463
        do_div(dx, USEC_PER_SEC);
464
        return dx;
465
}
466
 
467
/* convert sm (bytes/psched us) into m (bps) */
468
static u32
469
sm2m(u64 sm)
470
{
471
        u64 m;
472
 
473
        m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
474
        return (u32)m;
475
}
476
 
477
/* convert dx (psched us) into d (us) */
478
static u32
479
dx2d(u64 dx)
480
{
481
        u64 d;
482
 
483
        d = dx * USEC_PER_SEC;
484
        do_div(d, PSCHED_TICKS_PER_SEC);
485
        return (u32)d;
486
}
487
 
488
static void
489
sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
490
{
491
        isc->sm1  = m2sm(sc->m1);
492
        isc->ism1 = m2ism(sc->m1);
493
        isc->dx   = d2dx(sc->d);
494
        isc->dy   = seg_x2y(isc->dx, isc->sm1);
495
        isc->sm2  = m2sm(sc->m2);
496
        isc->ism2 = m2ism(sc->m2);
497
}
498
 
499
/*
500
 * initialize the runtime service curve with the given internal
501
 * service curve starting at (x, y).
502
 */
503
static void
504
rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
505
{
506
        rtsc->x    = x;
507
        rtsc->y    = y;
508
        rtsc->sm1  = isc->sm1;
509
        rtsc->ism1 = isc->ism1;
510
        rtsc->dx   = isc->dx;
511
        rtsc->dy   = isc->dy;
512
        rtsc->sm2  = isc->sm2;
513
        rtsc->ism2 = isc->ism2;
514
}
515
 
516
/*
517
 * calculate the y-projection of the runtime service curve by the
518
 * given x-projection value
519
 */
520
static u64
521
rtsc_y2x(struct runtime_sc *rtsc, u64 y)
522
{
523
        u64 x;
524
 
525
        if (y < rtsc->y)
526
                x = rtsc->x;
527
        else if (y <= rtsc->y + rtsc->dy) {
528
                /* x belongs to the 1st segment */
529
                if (rtsc->dy == 0)
530
                        x = rtsc->x + rtsc->dx;
531
                else
532
                        x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
533
        } else {
534
                /* x belongs to the 2nd segment */
535
                x = rtsc->x + rtsc->dx
536
                    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
537
        }
538
        return x;
539
}
540
 
541
static u64
542
rtsc_x2y(struct runtime_sc *rtsc, u64 x)
543
{
544
        u64 y;
545
 
546
        if (x <= rtsc->x)
547
                y = rtsc->y;
548
        else if (x <= rtsc->x + rtsc->dx)
549
                /* y belongs to the 1st segment */
550
                y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
551
        else
552
                /* y belongs to the 2nd segment */
553
                y = rtsc->y + rtsc->dy
554
                    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
555
        return y;
556
}
557
 
558
/*
559
 * update the runtime service curve by taking the minimum of the current
560
 * runtime service curve and the service curve starting at (x, y).
561
 */
562
static void
563
rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
564
{
565
        u64 y1, y2, dx, dy;
566
        u32 dsm;
567
 
568
        if (isc->sm1 <= isc->sm2) {
569
                /* service curve is convex */
570
                y1 = rtsc_x2y(rtsc, x);
571
                if (y1 < y)
572
                        /* the current rtsc is smaller */
573
                        return;
574
                rtsc->x = x;
575
                rtsc->y = y;
576
                return;
577
        }
578
 
579
        /*
580
         * service curve is concave
581
         * compute the two y values of the current rtsc
582
         *      y1: at x
583
         *      y2: at (x + dx)
584
         */
585
        y1 = rtsc_x2y(rtsc, x);
586
        if (y1 <= y) {
587
                /* rtsc is below isc, no change to rtsc */
588
                return;
589
        }
590
 
591
        y2 = rtsc_x2y(rtsc, x + isc->dx);
592
        if (y2 >= y + isc->dy) {
593
                /* rtsc is above isc, replace rtsc by isc */
594
                rtsc->x = x;
595
                rtsc->y = y;
596
                rtsc->dx = isc->dx;
597
                rtsc->dy = isc->dy;
598
                return;
599
        }
600
 
601
        /*
602
         * the two curves intersect
603
         * compute the offsets (dx, dy) using the reverse
604
         * function of seg_x2y()
605
         *      seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
606
         */
607
        dx = (y1 - y) << SM_SHIFT;
608
        dsm = isc->sm1 - isc->sm2;
609
        do_div(dx, dsm);
610
        /*
611
         * check if (x, y1) belongs to the 1st segment of rtsc.
612
         * if so, add the offset.
613
         */
614
        if (rtsc->x + rtsc->dx > x)
615
                dx += rtsc->x + rtsc->dx - x;
616
        dy = seg_x2y(dx, isc->sm1);
617
 
618
        rtsc->x = x;
619
        rtsc->y = y;
620
        rtsc->dx = dx;
621
        rtsc->dy = dy;
622
        return;
623
}
624
 
625
static void
626
init_ed(struct hfsc_class *cl, unsigned int next_len)
627
{
628
        u64 cur_time = psched_get_time();
629
 
630
        /* update the deadline curve */
631
        rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
632
 
633
        /*
634
         * update the eligible curve.
635
         * for concave, it is equal to the deadline curve.
636
         * for convex, it is a linear curve with slope m2.
637
         */
638
        cl->cl_eligible = cl->cl_deadline;
639
        if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
640
                cl->cl_eligible.dx = 0;
641
                cl->cl_eligible.dy = 0;
642
        }
643
 
644
        /* compute e and d */
645
        cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
646
        cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
647
 
648
        eltree_insert(cl);
649
}
650
 
651
static void
652
update_ed(struct hfsc_class *cl, unsigned int next_len)
653
{
654
        cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
655
        cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
656
 
657
        eltree_update(cl);
658
}
659
 
660
static inline void
661
update_d(struct hfsc_class *cl, unsigned int next_len)
662
{
663
        cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
664
}
665
 
666
static inline void
667
update_cfmin(struct hfsc_class *cl)
668
{
669
        struct rb_node *n = rb_first(&cl->cf_tree);
670
        struct hfsc_class *p;
671
 
672
        if (n == NULL) {
673
                cl->cl_cfmin = 0;
674
                return;
675
        }
676
        p = rb_entry(n, struct hfsc_class, cf_node);
677
        cl->cl_cfmin = p->cl_f;
678
}
679
 
680
static void
681
init_vf(struct hfsc_class *cl, unsigned int len)
682
{
683
        struct hfsc_class *max_cl;
684
        struct rb_node *n;
685
        u64 vt, f, cur_time;
686
        int go_active;
687
 
688
        cur_time = 0;
689
        go_active = 1;
690
        for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
691
                if (go_active && cl->cl_nactive++ == 0)
692
                        go_active = 1;
693
                else
694
                        go_active = 0;
695
 
696
                if (go_active) {
697
                        n = rb_last(&cl->cl_parent->vt_tree);
698
                        if (n != NULL) {
699
                                max_cl = rb_entry(n, struct hfsc_class,vt_node);
700
                                /*
701
                                 * set vt to the average of the min and max
702
                                 * classes.  if the parent's period didn't
703
                                 * change, don't decrease vt of the class.
704
                                 */
705
                                vt = max_cl->cl_vt;
706
                                if (cl->cl_parent->cl_cvtmin != 0)
707
                                        vt = (cl->cl_parent->cl_cvtmin + vt)/2;
708
 
709
                                if (cl->cl_parent->cl_vtperiod !=
710
                                    cl->cl_parentperiod || vt > cl->cl_vt)
711
                                        cl->cl_vt = vt;
712
                        } else {
713
                                /*
714
                                 * first child for a new parent backlog period.
715
                                 * add parent's cvtmax to cvtoff to make a new
716
                                 * vt (vtoff + vt) larger than the vt in the
717
                                 * last period for all children.
718
                                 */
719
                                vt = cl->cl_parent->cl_cvtmax;
720
                                cl->cl_parent->cl_cvtoff += vt;
721
                                cl->cl_parent->cl_cvtmax = 0;
722
                                cl->cl_parent->cl_cvtmin = 0;
723
                                cl->cl_vt = 0;
724
                        }
725
 
726
                        cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
727
                                                        cl->cl_pcvtoff;
728
 
729
                        /* update the virtual curve */
730
                        vt = cl->cl_vt + cl->cl_vtoff;
731
                        rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
732
                                                      cl->cl_total);
733
                        if (cl->cl_virtual.x == vt) {
734
                                cl->cl_virtual.x -= cl->cl_vtoff;
735
                                cl->cl_vtoff = 0;
736
                        }
737
                        cl->cl_vtadj = 0;
738
 
739
                        cl->cl_vtperiod++;  /* increment vt period */
740
                        cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
741
                        if (cl->cl_parent->cl_nactive == 0)
742
                                cl->cl_parentperiod++;
743
                        cl->cl_f = 0;
744
 
745
                        vttree_insert(cl);
746
                        cftree_insert(cl);
747
 
748
                        if (cl->cl_flags & HFSC_USC) {
749
                                /* class has upper limit curve */
750
                                if (cur_time == 0)
751
                                        cur_time = psched_get_time();
752
 
753
                                /* update the ulimit curve */
754
                                rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
755
                                         cl->cl_total);
756
                                /* compute myf */
757
                                cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
758
                                                      cl->cl_total);
759
                                cl->cl_myfadj = 0;
760
                        }
761
                }
762
 
763
                f = max(cl->cl_myf, cl->cl_cfmin);
764
                if (f != cl->cl_f) {
765
                        cl->cl_f = f;
766
                        cftree_update(cl);
767
                        update_cfmin(cl->cl_parent);
768
                }
769
        }
770
}
771
 
772
static void
773
update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
774
{
775
        u64 f; /* , myf_bound, delta; */
776
        int go_passive = 0;
777
 
778
        if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
779
                go_passive = 1;
780
 
781
        for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
782
                cl->cl_total += len;
783
 
784
                if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
785
                        continue;
786
 
787
                if (go_passive && --cl->cl_nactive == 0)
788
                        go_passive = 1;
789
                else
790
                        go_passive = 0;
791
 
792
                if (go_passive) {
793
                        /* no more active child, going passive */
794
 
795
                        /* update cvtmax of the parent class */
796
                        if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
797
                                cl->cl_parent->cl_cvtmax = cl->cl_vt;
798
 
799
                        /* remove this class from the vt tree */
800
                        vttree_remove(cl);
801
 
802
                        cftree_remove(cl);
803
                        update_cfmin(cl->cl_parent);
804
 
805
                        continue;
806
                }
807
 
808
                /*
809
                 * update vt and f
810
                 */
811
                cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
812
                            - cl->cl_vtoff + cl->cl_vtadj;
813
 
814
                /*
815
                 * if vt of the class is smaller than cvtmin,
816
                 * the class was skipped in the past due to non-fit.
817
                 * if so, we need to adjust vtadj.
818
                 */
819
                if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
820
                        cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
821
                        cl->cl_vt = cl->cl_parent->cl_cvtmin;
822
                }
823
 
824
                /* update the vt tree */
825
                vttree_update(cl);
826
 
827
                if (cl->cl_flags & HFSC_USC) {
828
                        cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
829
                                                              cl->cl_total);
830
#if 0
831
                        /*
832
                         * This code causes classes to stay way under their
833
                         * limit when multiple classes are used at gigabit
834
                         * speed. needs investigation. -kaber
835
                         */
836
                        /*
837
                         * if myf lags behind by more than one clock tick
838
                         * from the current time, adjust myfadj to prevent
839
                         * a rate-limited class from going greedy.
840
                         * in a steady state under rate-limiting, myf
841
                         * fluctuates within one clock tick.
842
                         */
843
                        myf_bound = cur_time - PSCHED_JIFFIE2US(1);
844
                        if (cl->cl_myf < myf_bound) {
845
                                delta = cur_time - cl->cl_myf;
846
                                cl->cl_myfadj += delta;
847
                                cl->cl_myf += delta;
848
                        }
849
#endif
850
                }
851
 
852
                f = max(cl->cl_myf, cl->cl_cfmin);
853
                if (f != cl->cl_f) {
854
                        cl->cl_f = f;
855
                        cftree_update(cl);
856
                        update_cfmin(cl->cl_parent);
857
                }
858
        }
859
}
860
 
861
static void
862
set_active(struct hfsc_class *cl, unsigned int len)
863
{
864
        if (cl->cl_flags & HFSC_RSC)
865
                init_ed(cl, len);
866
        if (cl->cl_flags & HFSC_FSC)
867
                init_vf(cl, len);
868
 
869
        list_add_tail(&cl->dlist, &cl->sched->droplist);
870
}
871
 
872
static void
873
set_passive(struct hfsc_class *cl)
874
{
875
        if (cl->cl_flags & HFSC_RSC)
876
                eltree_remove(cl);
877
 
878
        list_del(&cl->dlist);
879
 
880
        /*
881
         * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
882
         * needs to be called explicitly to remove a class from vttree.
883
         */
884
}
885
 
886
/*
887
 * hack to get length of first packet in queue.
888
 */
889
static unsigned int
890
qdisc_peek_len(struct Qdisc *sch)
891
{
892
        struct sk_buff *skb;
893
        unsigned int len;
894
 
895
        skb = sch->dequeue(sch);
896
        if (skb == NULL) {
897
                if (net_ratelimit())
898
                        printk("qdisc_peek_len: non work-conserving qdisc ?\n");
899
                return 0;
900
        }
901
        len = skb->len;
902
        if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
903
                if (net_ratelimit())
904
                        printk("qdisc_peek_len: failed to requeue\n");
905
                qdisc_tree_decrease_qlen(sch, 1);
906
                return 0;
907
        }
908
        return len;
909
}
910
 
911
static void
912
hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
913
{
914
        unsigned int len = cl->qdisc->q.qlen;
915
 
916
        qdisc_reset(cl->qdisc);
917
        qdisc_tree_decrease_qlen(cl->qdisc, len);
918
}
919
 
920
static void
921
hfsc_adjust_levels(struct hfsc_class *cl)
922
{
923
        struct hfsc_class *p;
924
        unsigned int level;
925
 
926
        do {
927
                level = 0;
928
                list_for_each_entry(p, &cl->children, siblings) {
929
                        if (p->level >= level)
930
                                level = p->level + 1;
931
                }
932
                cl->level = level;
933
        } while ((cl = cl->cl_parent) != NULL);
934
}
935
 
936
static inline unsigned int
937
hfsc_hash(u32 h)
938
{
939
        h ^= h >> 8;
940
        h ^= h >> 4;
941
 
942
        return h & (HFSC_HSIZE - 1);
943
}
944
 
945
static inline struct hfsc_class *
946
hfsc_find_class(u32 classid, struct Qdisc *sch)
947
{
948
        struct hfsc_sched *q = qdisc_priv(sch);
949
        struct hfsc_class *cl;
950
 
951
        list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
952
                if (cl->classid == classid)
953
                        return cl;
954
        }
955
        return NULL;
956
}
957
 
958
static void
959
hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
960
                u64 cur_time)
961
{
962
        sc2isc(rsc, &cl->cl_rsc);
963
        rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
964
        cl->cl_eligible = cl->cl_deadline;
965
        if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
966
                cl->cl_eligible.dx = 0;
967
                cl->cl_eligible.dy = 0;
968
        }
969
        cl->cl_flags |= HFSC_RSC;
970
}
971
 
972
static void
973
hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
974
{
975
        sc2isc(fsc, &cl->cl_fsc);
976
        rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
977
        cl->cl_flags |= HFSC_FSC;
978
}
979
 
980
static void
981
hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
982
                u64 cur_time)
983
{
984
        sc2isc(usc, &cl->cl_usc);
985
        rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
986
        cl->cl_flags |= HFSC_USC;
987
}
988
 
989
static int
990
hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
991
                  struct rtattr **tca, unsigned long *arg)
992
{
993
        struct hfsc_sched *q = qdisc_priv(sch);
994
        struct hfsc_class *cl = (struct hfsc_class *)*arg;
995
        struct hfsc_class *parent = NULL;
996
        struct rtattr *opt = tca[TCA_OPTIONS-1];
997
        struct rtattr *tb[TCA_HFSC_MAX];
998
        struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
999
        u64 cur_time;
1000
 
1001
        if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
1002
                return -EINVAL;
1003
 
1004
        if (tb[TCA_HFSC_RSC-1]) {
1005
                if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc))
1006
                        return -EINVAL;
1007
                rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]);
1008
                if (rsc->m1 == 0 && rsc->m2 == 0)
1009
                        rsc = NULL;
1010
        }
1011
 
1012
        if (tb[TCA_HFSC_FSC-1]) {
1013
                if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc))
1014
                        return -EINVAL;
1015
                fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]);
1016
                if (fsc->m1 == 0 && fsc->m2 == 0)
1017
                        fsc = NULL;
1018
        }
1019
 
1020
        if (tb[TCA_HFSC_USC-1]) {
1021
                if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc))
1022
                        return -EINVAL;
1023
                usc = RTA_DATA(tb[TCA_HFSC_USC-1]);
1024
                if (usc->m1 == 0 && usc->m2 == 0)
1025
                        usc = NULL;
1026
        }
1027
 
1028
        if (cl != NULL) {
1029
                if (parentid) {
1030
                        if (cl->cl_parent && cl->cl_parent->classid != parentid)
1031
                                return -EINVAL;
1032
                        if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1033
                                return -EINVAL;
1034
                }
1035
                cur_time = psched_get_time();
1036
 
1037
                sch_tree_lock(sch);
1038
                if (rsc != NULL)
1039
                        hfsc_change_rsc(cl, rsc, cur_time);
1040
                if (fsc != NULL)
1041
                        hfsc_change_fsc(cl, fsc);
1042
                if (usc != NULL)
1043
                        hfsc_change_usc(cl, usc, cur_time);
1044
 
1045
                if (cl->qdisc->q.qlen != 0) {
1046
                        if (cl->cl_flags & HFSC_RSC)
1047
                                update_ed(cl, qdisc_peek_len(cl->qdisc));
1048
                        if (cl->cl_flags & HFSC_FSC)
1049
                                update_vf(cl, 0, cur_time);
1050
                }
1051
                sch_tree_unlock(sch);
1052
 
1053
                if (tca[TCA_RATE-1])
1054
                        gen_replace_estimator(&cl->bstats, &cl->rate_est,
1055
                                              &sch->dev->queue_lock,
1056
                                              tca[TCA_RATE-1]);
1057
                return 0;
1058
        }
1059
 
1060
        if (parentid == TC_H_ROOT)
1061
                return -EEXIST;
1062
 
1063
        parent = &q->root;
1064
        if (parentid) {
1065
                parent = hfsc_find_class(parentid, sch);
1066
                if (parent == NULL)
1067
                        return -ENOENT;
1068
        }
1069
 
1070
        if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1071
                return -EINVAL;
1072
        if (hfsc_find_class(classid, sch))
1073
                return -EEXIST;
1074
 
1075
        if (rsc == NULL && fsc == NULL)
1076
                return -EINVAL;
1077
 
1078
        cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1079
        if (cl == NULL)
1080
                return -ENOBUFS;
1081
 
1082
        if (rsc != NULL)
1083
                hfsc_change_rsc(cl, rsc, 0);
1084
        if (fsc != NULL)
1085
                hfsc_change_fsc(cl, fsc);
1086
        if (usc != NULL)
1087
                hfsc_change_usc(cl, usc, 0);
1088
 
1089
        cl->refcnt    = 1;
1090
        cl->classid   = classid;
1091
        cl->sched     = q;
1092
        cl->cl_parent = parent;
1093
        cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1094
        if (cl->qdisc == NULL)
1095
                cl->qdisc = &noop_qdisc;
1096
        INIT_LIST_HEAD(&cl->children);
1097
        cl->vt_tree = RB_ROOT;
1098
        cl->cf_tree = RB_ROOT;
1099
 
1100
        sch_tree_lock(sch);
1101
        list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
1102
        list_add_tail(&cl->siblings, &parent->children);
1103
        if (parent->level == 0)
1104
                hfsc_purge_queue(sch, parent);
1105
        hfsc_adjust_levels(parent);
1106
        cl->cl_pcvtoff = parent->cl_cvtoff;
1107
        sch_tree_unlock(sch);
1108
 
1109
        if (tca[TCA_RATE-1])
1110
                gen_new_estimator(&cl->bstats, &cl->rate_est,
1111
                                  &sch->dev->queue_lock, tca[TCA_RATE-1]);
1112
        *arg = (unsigned long)cl;
1113
        return 0;
1114
}
1115
 
1116
static void
1117
hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1118
{
1119
        struct hfsc_sched *q = qdisc_priv(sch);
1120
 
1121
        tcf_destroy_chain(cl->filter_list);
1122
        qdisc_destroy(cl->qdisc);
1123
        gen_kill_estimator(&cl->bstats, &cl->rate_est);
1124
        if (cl != &q->root)
1125
                kfree(cl);
1126
}
1127
 
1128
static int
1129
hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1130
{
1131
        struct hfsc_sched *q = qdisc_priv(sch);
1132
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1133
 
1134
        if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1135
                return -EBUSY;
1136
 
1137
        sch_tree_lock(sch);
1138
 
1139
        list_del(&cl->siblings);
1140
        hfsc_adjust_levels(cl->cl_parent);
1141
 
1142
        hfsc_purge_queue(sch, cl);
1143
        list_del(&cl->hlist);
1144
 
1145
        if (--cl->refcnt == 0)
1146
                hfsc_destroy_class(sch, cl);
1147
 
1148
        sch_tree_unlock(sch);
1149
        return 0;
1150
}
1151
 
1152
static struct hfsc_class *
1153
hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1154
{
1155
        struct hfsc_sched *q = qdisc_priv(sch);
1156
        struct hfsc_class *cl;
1157
        struct tcf_result res;
1158
        struct tcf_proto *tcf;
1159
        int result;
1160
 
1161
        if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1162
            (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1163
                if (cl->level == 0)
1164
                        return cl;
1165
 
1166
        *qerr = NET_XMIT_BYPASS;
1167
        tcf = q->root.filter_list;
1168
        while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1169
#ifdef CONFIG_NET_CLS_ACT
1170
                switch (result) {
1171
                case TC_ACT_QUEUED:
1172
                case TC_ACT_STOLEN:
1173
                        *qerr = NET_XMIT_SUCCESS;
1174
                case TC_ACT_SHOT:
1175
                        return NULL;
1176
                }
1177
#endif
1178
                if ((cl = (struct hfsc_class *)res.class) == NULL) {
1179
                        if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
1180
                                break; /* filter selected invalid classid */
1181
                }
1182
 
1183
                if (cl->level == 0)
1184
                        return cl; /* hit leaf class */
1185
 
1186
                /* apply inner filter chain */
1187
                tcf = cl->filter_list;
1188
        }
1189
 
1190
        /* classification failed, try default class */
1191
        cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1192
        if (cl == NULL || cl->level > 0)
1193
                return NULL;
1194
 
1195
        return cl;
1196
}
1197
 
1198
static int
1199
hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1200
                 struct Qdisc **old)
1201
{
1202
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1203
 
1204
        if (cl == NULL)
1205
                return -ENOENT;
1206
        if (cl->level > 0)
1207
                return -EINVAL;
1208
        if (new == NULL) {
1209
                new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1210
                                        cl->classid);
1211
                if (new == NULL)
1212
                        new = &noop_qdisc;
1213
        }
1214
 
1215
        sch_tree_lock(sch);
1216
        hfsc_purge_queue(sch, cl);
1217
        *old = xchg(&cl->qdisc, new);
1218
        sch_tree_unlock(sch);
1219
        return 0;
1220
}
1221
 
1222
static struct Qdisc *
1223
hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1224
{
1225
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1226
 
1227
        if (cl != NULL && cl->level == 0)
1228
                return cl->qdisc;
1229
 
1230
        return NULL;
1231
}
1232
 
1233
static void
1234
hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1235
{
1236
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1237
 
1238
        if (cl->qdisc->q.qlen == 0) {
1239
                update_vf(cl, 0, 0);
1240
                set_passive(cl);
1241
        }
1242
}
1243
 
1244
static unsigned long
1245
hfsc_get_class(struct Qdisc *sch, u32 classid)
1246
{
1247
        struct hfsc_class *cl = hfsc_find_class(classid, sch);
1248
 
1249
        if (cl != NULL)
1250
                cl->refcnt++;
1251
 
1252
        return (unsigned long)cl;
1253
}
1254
 
1255
static void
1256
hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1257
{
1258
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1259
 
1260
        if (--cl->refcnt == 0)
1261
                hfsc_destroy_class(sch, cl);
1262
}
1263
 
1264
static unsigned long
1265
hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1266
{
1267
        struct hfsc_class *p = (struct hfsc_class *)parent;
1268
        struct hfsc_class *cl = hfsc_find_class(classid, sch);
1269
 
1270
        if (cl != NULL) {
1271
                if (p != NULL && p->level <= cl->level)
1272
                        return 0;
1273
                cl->filter_cnt++;
1274
        }
1275
 
1276
        return (unsigned long)cl;
1277
}
1278
 
1279
static void
1280
hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1281
{
1282
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1283
 
1284
        cl->filter_cnt--;
1285
}
1286
 
1287
static struct tcf_proto **
1288
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1289
{
1290
        struct hfsc_sched *q = qdisc_priv(sch);
1291
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1292
 
1293
        if (cl == NULL)
1294
                cl = &q->root;
1295
 
1296
        return &cl->filter_list;
1297
}
1298
 
1299
static int
1300
hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1301
{
1302
        struct tc_service_curve tsc;
1303
 
1304
        tsc.m1 = sm2m(sc->sm1);
1305
        tsc.d  = dx2d(sc->dx);
1306
        tsc.m2 = sm2m(sc->sm2);
1307
        RTA_PUT(skb, attr, sizeof(tsc), &tsc);
1308
 
1309
        return skb->len;
1310
 
1311
 rtattr_failure:
1312
        return -1;
1313
}
1314
 
1315
static inline int
1316
hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1317
{
1318
        if ((cl->cl_flags & HFSC_RSC) &&
1319
            (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1320
                goto rtattr_failure;
1321
 
1322
        if ((cl->cl_flags & HFSC_FSC) &&
1323
            (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1324
                goto rtattr_failure;
1325
 
1326
        if ((cl->cl_flags & HFSC_USC) &&
1327
            (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1328
                goto rtattr_failure;
1329
 
1330
        return skb->len;
1331
 
1332
 rtattr_failure:
1333
        return -1;
1334
}
1335
 
1336
static int
1337
hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1338
                struct tcmsg *tcm)
1339
{
1340
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1341
        unsigned char *b = skb_tail_pointer(skb);
1342
        struct rtattr *rta = (struct rtattr *)b;
1343
 
1344
        tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
1345
        tcm->tcm_handle = cl->classid;
1346
        if (cl->level == 0)
1347
                tcm->tcm_info = cl->qdisc->handle;
1348
 
1349
        RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1350
        if (hfsc_dump_curves(skb, cl) < 0)
1351
                goto rtattr_failure;
1352
        rta->rta_len = skb_tail_pointer(skb) - b;
1353
        return skb->len;
1354
 
1355
 rtattr_failure:
1356
        nlmsg_trim(skb, b);
1357
        return -1;
1358
}
1359
 
1360
static int
1361
hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1362
        struct gnet_dump *d)
1363
{
1364
        struct hfsc_class *cl = (struct hfsc_class *)arg;
1365
        struct tc_hfsc_stats xstats;
1366
 
1367
        cl->qstats.qlen = cl->qdisc->q.qlen;
1368
        xstats.level   = cl->level;
1369
        xstats.period  = cl->cl_vtperiod;
1370
        xstats.work    = cl->cl_total;
1371
        xstats.rtwork  = cl->cl_cumul;
1372
 
1373
        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1374
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1375
            gnet_stats_copy_queue(d, &cl->qstats) < 0)
1376
                return -1;
1377
 
1378
        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1379
}
1380
 
1381
 
1382
 
1383
static void
1384
hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1385
{
1386
        struct hfsc_sched *q = qdisc_priv(sch);
1387
        struct hfsc_class *cl;
1388
        unsigned int i;
1389
 
1390
        if (arg->stop)
1391
                return;
1392
 
1393
        for (i = 0; i < HFSC_HSIZE; i++) {
1394
                list_for_each_entry(cl, &q->clhash[i], hlist) {
1395
                        if (arg->count < arg->skip) {
1396
                                arg->count++;
1397
                                continue;
1398
                        }
1399
                        if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1400
                                arg->stop = 1;
1401
                                return;
1402
                        }
1403
                        arg->count++;
1404
                }
1405
        }
1406
}
1407
 
1408
static void
1409
hfsc_schedule_watchdog(struct Qdisc *sch)
1410
{
1411
        struct hfsc_sched *q = qdisc_priv(sch);
1412
        struct hfsc_class *cl;
1413
        u64 next_time = 0;
1414
 
1415
        if ((cl = eltree_get_minel(q)) != NULL)
1416
                next_time = cl->cl_e;
1417
        if (q->root.cl_cfmin != 0) {
1418
                if (next_time == 0 || next_time > q->root.cl_cfmin)
1419
                        next_time = q->root.cl_cfmin;
1420
        }
1421
        WARN_ON(next_time == 0);
1422
        qdisc_watchdog_schedule(&q->watchdog, next_time);
1423
}
1424
 
1425
static int
1426
hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
1427
{
1428
        struct hfsc_sched *q = qdisc_priv(sch);
1429
        struct tc_hfsc_qopt *qopt;
1430
        unsigned int i;
1431
 
1432
        if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1433
                return -EINVAL;
1434
        qopt = RTA_DATA(opt);
1435
 
1436
        q->defcls = qopt->defcls;
1437
        for (i = 0; i < HFSC_HSIZE; i++)
1438
                INIT_LIST_HEAD(&q->clhash[i]);
1439
        q->eligible = RB_ROOT;
1440
        INIT_LIST_HEAD(&q->droplist);
1441
        skb_queue_head_init(&q->requeue);
1442
 
1443
        q->root.refcnt  = 1;
1444
        q->root.classid = sch->handle;
1445
        q->root.sched   = q;
1446
        q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1447
                                          sch->handle);
1448
        if (q->root.qdisc == NULL)
1449
                q->root.qdisc = &noop_qdisc;
1450
        INIT_LIST_HEAD(&q->root.children);
1451
        q->root.vt_tree = RB_ROOT;
1452
        q->root.cf_tree = RB_ROOT;
1453
 
1454
        list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
1455
 
1456
        qdisc_watchdog_init(&q->watchdog, sch);
1457
 
1458
        return 0;
1459
}
1460
 
1461
static int
1462
hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
1463
{
1464
        struct hfsc_sched *q = qdisc_priv(sch);
1465
        struct tc_hfsc_qopt *qopt;
1466
 
1467
        if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1468
                return -EINVAL;
1469
        qopt = RTA_DATA(opt);
1470
 
1471
        sch_tree_lock(sch);
1472
        q->defcls = qopt->defcls;
1473
        sch_tree_unlock(sch);
1474
 
1475
        return 0;
1476
}
1477
 
1478
static void
1479
hfsc_reset_class(struct hfsc_class *cl)
1480
{
1481
        cl->cl_total        = 0;
1482
        cl->cl_cumul        = 0;
1483
        cl->cl_d            = 0;
1484
        cl->cl_e            = 0;
1485
        cl->cl_vt           = 0;
1486
        cl->cl_vtadj        = 0;
1487
        cl->cl_vtoff        = 0;
1488
        cl->cl_cvtmin       = 0;
1489
        cl->cl_cvtmax       = 0;
1490
        cl->cl_cvtoff       = 0;
1491
        cl->cl_pcvtoff      = 0;
1492
        cl->cl_vtperiod     = 0;
1493
        cl->cl_parentperiod = 0;
1494
        cl->cl_f            = 0;
1495
        cl->cl_myf          = 0;
1496
        cl->cl_myfadj       = 0;
1497
        cl->cl_cfmin        = 0;
1498
        cl->cl_nactive      = 0;
1499
 
1500
        cl->vt_tree = RB_ROOT;
1501
        cl->cf_tree = RB_ROOT;
1502
        qdisc_reset(cl->qdisc);
1503
 
1504
        if (cl->cl_flags & HFSC_RSC)
1505
                rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1506
        if (cl->cl_flags & HFSC_FSC)
1507
                rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1508
        if (cl->cl_flags & HFSC_USC)
1509
                rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1510
}
1511
 
1512
static void
1513
hfsc_reset_qdisc(struct Qdisc *sch)
1514
{
1515
        struct hfsc_sched *q = qdisc_priv(sch);
1516
        struct hfsc_class *cl;
1517
        unsigned int i;
1518
 
1519
        for (i = 0; i < HFSC_HSIZE; i++) {
1520
                list_for_each_entry(cl, &q->clhash[i], hlist)
1521
                        hfsc_reset_class(cl);
1522
        }
1523
        __skb_queue_purge(&q->requeue);
1524
        q->eligible = RB_ROOT;
1525
        INIT_LIST_HEAD(&q->droplist);
1526
        qdisc_watchdog_cancel(&q->watchdog);
1527
        sch->q.qlen = 0;
1528
}
1529
 
1530
static void
1531
hfsc_destroy_qdisc(struct Qdisc *sch)
1532
{
1533
        struct hfsc_sched *q = qdisc_priv(sch);
1534
        struct hfsc_class *cl, *next;
1535
        unsigned int i;
1536
 
1537
        for (i = 0; i < HFSC_HSIZE; i++) {
1538
                list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
1539
                        hfsc_destroy_class(sch, cl);
1540
        }
1541
        __skb_queue_purge(&q->requeue);
1542
        qdisc_watchdog_cancel(&q->watchdog);
1543
}
1544
 
1545
static int
1546
hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1547
{
1548
        struct hfsc_sched *q = qdisc_priv(sch);
1549
        unsigned char *b = skb_tail_pointer(skb);
1550
        struct tc_hfsc_qopt qopt;
1551
 
1552
        qopt.defcls = q->defcls;
1553
        RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1554
        return skb->len;
1555
 
1556
 rtattr_failure:
1557
        nlmsg_trim(skb, b);
1558
        return -1;
1559
}
1560
 
1561
static int
1562
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1563
{
1564
        struct hfsc_class *cl;
1565
        unsigned int len;
1566
        int err;
1567
 
1568
        cl = hfsc_classify(skb, sch, &err);
1569
        if (cl == NULL) {
1570
                if (err == NET_XMIT_BYPASS)
1571
                        sch->qstats.drops++;
1572
                kfree_skb(skb);
1573
                return err;
1574
        }
1575
 
1576
        len = skb->len;
1577
        err = cl->qdisc->enqueue(skb, cl->qdisc);
1578
        if (unlikely(err != NET_XMIT_SUCCESS)) {
1579
                cl->qstats.drops++;
1580
                sch->qstats.drops++;
1581
                return err;
1582
        }
1583
 
1584
        if (cl->qdisc->q.qlen == 1)
1585
                set_active(cl, len);
1586
 
1587
        cl->bstats.packets++;
1588
        cl->bstats.bytes += len;
1589
        sch->bstats.packets++;
1590
        sch->bstats.bytes += len;
1591
        sch->q.qlen++;
1592
 
1593
        return NET_XMIT_SUCCESS;
1594
}
1595
 
1596
static struct sk_buff *
1597
hfsc_dequeue(struct Qdisc *sch)
1598
{
1599
        struct hfsc_sched *q = qdisc_priv(sch);
1600
        struct hfsc_class *cl;
1601
        struct sk_buff *skb;
1602
        u64 cur_time;
1603
        unsigned int next_len;
1604
        int realtime = 0;
1605
 
1606
        if (sch->q.qlen == 0)
1607
                return NULL;
1608
        if ((skb = __skb_dequeue(&q->requeue)))
1609
                goto out;
1610
 
1611
        cur_time = psched_get_time();
1612
 
1613
        /*
1614
         * if there are eligible classes, use real-time criteria.
1615
         * find the class with the minimum deadline among
1616
         * the eligible classes.
1617
         */
1618
        if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
1619
                realtime = 1;
1620
        } else {
1621
                /*
1622
                 * use link-sharing criteria
1623
                 * get the class with the minimum vt in the hierarchy
1624
                 */
1625
                cl = vttree_get_minvt(&q->root, cur_time);
1626
                if (cl == NULL) {
1627
                        sch->qstats.overlimits++;
1628
                        hfsc_schedule_watchdog(sch);
1629
                        return NULL;
1630
                }
1631
        }
1632
 
1633
        skb = cl->qdisc->dequeue(cl->qdisc);
1634
        if (skb == NULL) {
1635
                if (net_ratelimit())
1636
                        printk("HFSC: Non-work-conserving qdisc ?\n");
1637
                return NULL;
1638
        }
1639
 
1640
        update_vf(cl, skb->len, cur_time);
1641
        if (realtime)
1642
                cl->cl_cumul += skb->len;
1643
 
1644
        if (cl->qdisc->q.qlen != 0) {
1645
                if (cl->cl_flags & HFSC_RSC) {
1646
                        /* update ed */
1647
                        next_len = qdisc_peek_len(cl->qdisc);
1648
                        if (realtime)
1649
                                update_ed(cl, next_len);
1650
                        else
1651
                                update_d(cl, next_len);
1652
                }
1653
        } else {
1654
                /* the class becomes passive */
1655
                set_passive(cl);
1656
        }
1657
 
1658
 out:
1659
        sch->flags &= ~TCQ_F_THROTTLED;
1660
        sch->q.qlen--;
1661
 
1662
        return skb;
1663
}
1664
 
1665
static int
1666
hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
1667
{
1668
        struct hfsc_sched *q = qdisc_priv(sch);
1669
 
1670
        __skb_queue_head(&q->requeue, skb);
1671
        sch->q.qlen++;
1672
        sch->qstats.requeues++;
1673
        return NET_XMIT_SUCCESS;
1674
}
1675
 
1676
static unsigned int
1677
hfsc_drop(struct Qdisc *sch)
1678
{
1679
        struct hfsc_sched *q = qdisc_priv(sch);
1680
        struct hfsc_class *cl;
1681
        unsigned int len;
1682
 
1683
        list_for_each_entry(cl, &q->droplist, dlist) {
1684
                if (cl->qdisc->ops->drop != NULL &&
1685
                    (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1686
                        if (cl->qdisc->q.qlen == 0) {
1687
                                update_vf(cl, 0, 0);
1688
                                set_passive(cl);
1689
                        } else {
1690
                                list_move_tail(&cl->dlist, &q->droplist);
1691
                        }
1692
                        cl->qstats.drops++;
1693
                        sch->qstats.drops++;
1694
                        sch->q.qlen--;
1695
                        return len;
1696
                }
1697
        }
1698
        return 0;
1699
}
1700
 
1701
static struct Qdisc_class_ops hfsc_class_ops = {
1702
        .change         = hfsc_change_class,
1703
        .delete         = hfsc_delete_class,
1704
        .graft          = hfsc_graft_class,
1705
        .leaf           = hfsc_class_leaf,
1706
        .qlen_notify    = hfsc_qlen_notify,
1707
        .get            = hfsc_get_class,
1708
        .put            = hfsc_put_class,
1709
        .bind_tcf       = hfsc_bind_tcf,
1710
        .unbind_tcf     = hfsc_unbind_tcf,
1711
        .tcf_chain      = hfsc_tcf_chain,
1712
        .dump           = hfsc_dump_class,
1713
        .dump_stats     = hfsc_dump_class_stats,
1714
        .walk           = hfsc_walk
1715
};
1716
 
1717
static struct Qdisc_ops hfsc_qdisc_ops = {
1718
        .id             = "hfsc",
1719
        .init           = hfsc_init_qdisc,
1720
        .change         = hfsc_change_qdisc,
1721
        .reset          = hfsc_reset_qdisc,
1722
        .destroy        = hfsc_destroy_qdisc,
1723
        .dump           = hfsc_dump_qdisc,
1724
        .enqueue        = hfsc_enqueue,
1725
        .dequeue        = hfsc_dequeue,
1726
        .requeue        = hfsc_requeue,
1727
        .drop           = hfsc_drop,
1728
        .cl_ops         = &hfsc_class_ops,
1729
        .priv_size      = sizeof(struct hfsc_sched),
1730
        .owner          = THIS_MODULE
1731
};
1732
 
1733
static int __init
1734
hfsc_init(void)
1735
{
1736
        return register_qdisc(&hfsc_qdisc_ops);
1737
}
1738
 
1739
static void __exit
1740
hfsc_cleanup(void)
1741
{
1742
        unregister_qdisc(&hfsc_qdisc_ops);
1743
}
1744
 
1745
MODULE_LICENSE("GPL");
1746
module_init(hfsc_init);
1747
module_exit(hfsc_cleanup);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.