OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [drivers/] [net/] [shaper.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1626 jcastillo
/*
2
 *                      Simple traffic shaper for Linux NET3.
3
 *
4
 *      (c) Copyright 1996 Alan Cox <alan@cymru.net>, All Rights Reserved.
5
 *                              http://www.cymru.net
6
 *
7
 *      This program is free software; you can redistribute it and/or
8
 *      modify it under the terms of the GNU General Public License
9
 *      as published by the Free Software Foundation; either version
10
 *      2 of the License, or (at your option) any later version.
11
 *
12
 *      Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13
 *      warranty for any of this software. This material is provided
14
 *      "AS-IS" and at no charge.
15
 *
16
 *
17
 *      Algorithm:
18
 *
19
 *      Queue Frame:
20
 *              Compute time length of frame at regulated speed
21
 *              Add frame to queue at appropriate point
22
 *              Adjust time length computation for followup frames
23
 *              Any frame that falls outside of its boundaries is freed
24
 *
25
 *      We work to the following constants
26
 *
27
 *              SHAPER_QLEN     Maximum queued frames
28
 *              SHAPER_LATENCY  Bounding latency on a frame. Leaving this latency
29
 *                              window drops the frame. This stops us queueing
30
 *                              frames for a long time and confusing a remote
31
 *                              host.
32
 *              SHAPER_MAXSLIP  Maximum time a priority frame may jump forward.
33
 *                              That bounds the penalty we will inflict on low
34
 *                              priority traffic.
35
 *              SHAPER_BURST    Time range we call "now" in order to reduce
36
 *                              system load. The more we make this the burstier
37
 *                              the behaviour, the better local performance you
38
 *                              get through packet clustering on routers and the
39
 *                              worse the remote end gets to judge rtts.
40
 *
41
 *      This is designed to handle lower speed links ( < 200K/second or so). We
42
 *      run off a 100-150Hz base clock typically. This gives us a resolution at
43
 *      200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44
 *      resolution may start to cause much more burstiness in the traffic. We
45
 *      could avoid a lot of that by calling kick_shaper() at the end of the
46
 *      tied device transmissions. If you run above about 100K second you
47
 *      may need to tune the supposed speed rate for the right values.
48
 *
49
 *      BUGS:
50
 *              Downing the interface under the shaper before the shaper
51
 *              will render your machine defunct. Don't for now shape over
52
 *              PPP or SLIP therefore!
53
 *              This will be fixed in BETA4
54
 */
55
 
56
 
57
#include <linux/module.h>
58
#include <linux/kernel.h>
59
#include <linux/sched.h>
60
#include <linux/ptrace.h>
61
#include <linux/fcntl.h>
62
#include <linux/sched.h>
63
#include <linux/mm.h>
64
#include <linux/malloc.h>
65
#include <linux/string.h>
66
#include <linux/errno.h>
67
#include <linux/netdevice.h>
68
#include <linux/etherdevice.h>
69
#include <linux/skbuff.h>
70
#include <linux/if_arp.h>
71
#include <linux/if_shaper.h>
72
 
73
int sh_debug;           /* Debug flag */
74
 
75
#define SHAPER_BANNER   "Traffic Shaper 0.05 for Linux 2.0 <alan@redhat.com>\n"
76
 
77
/*
78
 *      Locking
79
 */
80
 
81
static int shaper_lock(struct shaper *sh)
82
{
83
        unsigned long flags;
84
        save_flags(flags);
85
        cli();
86
        /*
87
         *      Lock in an interrupt may fail
88
         */
89
        if(sh->locked && intr_count)
90
        {
91
                restore_flags(flags);
92
                return 0;
93
        }
94
        while(sh->locked)
95
                sleep_on(&sh->wait_queue);
96
        sh->locked=1;
97
        restore_flags(flags);
98
        return 1;
99
}
100
 
101
static void shaper_kick(struct shaper *sh);
102
 
103
static void shaper_unlock(struct shaper *sh)
104
{
105
        sh->locked=0;
106
        wake_up(&sh->wait_queue);
107
        shaper_kick(sh);
108
}
109
 
110
/*
111
 *      Compute clocks on a buffer
112
 */
113
 
114
static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
115
{
116
        int t=skb->len/shaper->bytespertick;
117
        return t;
118
}
119
 
120
/*
121
 *      Set the speed of a shaper. We compute this in bytes per tick since
122
 *      thats how the machine wants to run. Quoted input is in bits per second
123
 *      as is traditional (note not BAUD). We assume 8 bit bytes.
124
 */
125
 
126
static void shaper_setspeed(struct shaper *shaper, int bitspersec)
127
{
128
        shaper->bitspersec=bitspersec;
129
        shaper->bytespertick=(bitspersec/HZ)/8;
130
        if(!shaper->bytespertick)
131
                shaper->bytespertick++;
132
}
133
 
134
/*
135
 *      Throw a frame at a shaper.
136
 */
137
 
138
static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
139
{
140
        struct sk_buff *ptr;
141
 
142
        /*
143
         *      Get ready to work on this shaper. Lock may fail if its
144
         *      an interrupt and locked.
145
         */
146
 
147
        if(!shaper_lock(shaper))
148
                return -1;
149
        ptr=shaper->sendq.prev;
150
 
151
        /*
152
         *      Set up our packet details
153
         */
154
 
155
        skb->shapelatency=0;
156
        skb->shapeclock=shaper->recovery;
157
        if(skb->shapeclock<jiffies)
158
                skb->shapeclock=jiffies;
159
        skb->shapestamp=jiffies;
160
 
161
        /*
162
         *      Time slots for this packet.
163
         */
164
 
165
        skb->shapelen= shaper_clocks(shaper,skb);
166
 
167
#ifdef SHAPER_COMPLEX /* and broken.. */
168
 
169
        while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
170
        {
171
                if(ptr->pri<skb->pri
172
                        && jiffies - ptr->shapeclock < SHAPER_MAXSLIP)
173
                {
174
                        struct sk_buff *tmp=ptr->prev;
175
 
176
                        /*
177
                         *      It goes before us therefore we slip the length
178
                         *      of the new frame.
179
                         */
180
 
181
                        ptr->shapeclock+=skb->shapelen;
182
                        ptr->shapelatency+=skb->shapelen;
183
 
184
                        /*
185
                         *      The packet may have slipped so far back it
186
                         *      fell off.
187
                         */
188
                        if(ptr->shapelatency > SHAPER_LATENCY)
189
                        {
190
                                skb_unlink(ptr);
191
                                dev_kfree_skb(ptr, FREE_WRITE);
192
                        }
193
                        ptr=tmp;
194
                }
195
                else
196
                        break;
197
        }
198
        if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
199
                skb_queue_head(&shaper->sendq,skb);
200
        else
201
        {
202
                struct sk_buff *tmp;
203
                /*
204
                 *      Set the packet clock out time according to the
205
                 *      frames ahead. Im sure a bit of thought could drop
206
                 *      this loop.
207
                 */
208
                for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
209
                        skb->shapeclock+=tmp->shapelen;
210
                skb_append(ptr,skb);
211
        }
212
#else
213
        {
214
                struct sk_buff *tmp;
215
                /*
216
                 *      Up our shape clock by the time pending on the queue
217
                 *      (Should keep this in the shaper as a variable..)
218
                 */
219
                for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
220
                        tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
221
                        skb->shapeclock+=tmp->shapelen;
222
                /*
223
                 *      Queue over time. Spill packet.
224
                 */
225
                if(skb->shapeclock-jiffies > SHAPER_LATENCY)
226
                        dev_kfree_skb(skb, FREE_WRITE);
227
                else
228
                        skb_queue_tail(&shaper->sendq, skb);
229
        }
230
#endif  
231
        if(sh_debug)
232
                printk("Frame queued.\n");
233
        if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
234
        {
235
                ptr=skb_dequeue(&shaper->sendq);
236
                dev_kfree_skb(ptr, FREE_WRITE);
237
        }
238
        shaper_unlock(shaper);
239
        shaper_kick(shaper);
240
        return 0;
241
}
242
 
243
/*
244
 *      Transmit from a shaper
245
 */
246
 
247
static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
248
{
249
        struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
250
        if(sh_debug)
251
                printk("Kick frame on %p\n",newskb);
252
        if(newskb)
253
        {
254
                newskb->dev=shaper->dev;
255
                newskb->arp=1;
256
                if(sh_debug)
257
                        printk("Kick new frame to %s\n",
258
                                shaper->dev->name);
259
                dev_queue_xmit(newskb,shaper->dev,2);
260
                if(sh_debug)
261
                        printk("Kicked new frame out.\n");
262
                dev_kfree_skb(skb, FREE_WRITE);
263
        }
264
}
265
 
266
/*
267
 *      Timer handler for shaping clock
268
 */
269
 
270
static void shaper_timer(unsigned long data)
271
{
272
        struct shaper *sh=(struct shaper *)data;
273
        shaper_kick(sh);
274
}
275
 
276
/*
277
 *      Kick a shaper queue and try and do something sensible with the
278
 *      queue.
279
 */
280
 
281
static void shaper_kick(struct shaper *shaper)
282
{
283
        struct sk_buff *skb;
284
        unsigned long flags;
285
 
286
        save_flags(flags);
287
        cli();
288
 
289
        del_timer(&shaper->timer);
290
 
291
        /*
292
         *      Shaper unlock will kick
293
         */
294
 
295
        if(shaper->locked)
296
        {
297
                if(sh_debug)
298
                        printk("Shaper locked.\n");
299
                shaper->timer.expires=jiffies+1;
300
                add_timer(&shaper->timer);
301
                restore_flags(flags);
302
                return;
303
        }
304
 
305
 
306
        /*
307
         *      Walk the list (may be empty)
308
         */
309
 
310
        while((skb=skb_peek(&shaper->sendq))!=NULL)
311
        {
312
                /*
313
                 *      Each packet due to go out by now (within an error
314
                 *      of SHAPER_BURST) gets kicked onto the link
315
                 */
316
 
317
                if(sh_debug)
318
                        printk("Clock = %d, jiffies = %ld\n", skb->shapeclock, jiffies);
319
                if(skb->shapeclock <= jiffies + SHAPER_BURST)
320
                {
321
                        /*
322
                         *      Pull the frame and get interrupts back on.
323
                         */
324
 
325
                        skb_unlink(skb);
326
                        if (shaper->recovery < skb->shapeclock + skb->shapelen)
327
                                shaper->recovery = skb->shapeclock + skb->shapelen;
328
                        restore_flags(flags);
329
 
330
                        /*
331
                         *      Pass on to the physical target device via
332
                         *      our low level packet thrower.
333
                         */
334
 
335
                        skb->shapepend=0;
336
                        shaper_queue_xmit(shaper, skb); /* Fire */
337
                        cli();
338
                }
339
                else
340
                        break;
341
        }
342
 
343
        /*
344
         *      Next kick.
345
         */
346
 
347
        if(skb!=NULL)
348
        {
349
                del_timer(&shaper->timer);
350
                shaper->timer.expires=skb->shapeclock;
351
                add_timer(&shaper->timer);
352
        }
353
 
354
        /*
355
         *      Interrupts on, mission complete
356
         */
357
 
358
        restore_flags(flags);
359
}
360
 
361
 
362
/*
363
 *      Flush the shaper queues on a closedown
364
 */
365
 
366
static void shaper_flush(struct shaper *shaper)
367
{
368
        struct sk_buff *skb;
369
        while((skb=skb_dequeue(&shaper->sendq))!=NULL)
370
                dev_kfree_skb(skb, FREE_WRITE);
371
}
372
 
373
/*
374
 *      Bring the interface up. We just disallow this until a
375
 *      bind.
376
 */
377
 
378
static int shaper_open(struct device *dev)
379
{
380
        struct shaper *shaper=dev->priv;
381
 
382
        /*
383
         *      Can't open until attached.
384
         *      Also can't open until speed is set, or we'll get
385
         *      a division by zero.
386
         */
387
 
388
        if(shaper->dev==NULL)
389
                return -ENODEV;
390
        if(shaper->bitspersec==0)
391
                return -EINVAL;
392
        MOD_INC_USE_COUNT;
393
        return 0;
394
}
395
 
396
/*
397
 *      Closing a shaper flushes the queues.
398
 */
399
 
400
static int shaper_close(struct device *dev)
401
{
402
        struct shaper *shaper=dev->priv;
403
        shaper_flush(shaper);
404
        del_timer(&shaper->timer);
405
        MOD_DEC_USE_COUNT;
406
        return 0;
407
}
408
 
409
/*
410
 *      Revectored calls. We alter the parameters and call the functions
411
 *      for our attached device. This enables us to bandwidth allocate after
412
 *      ARP and other resolutions and not before.
413
 */
414
 
415
 
416
static int shaper_start_xmit(struct sk_buff *skb, struct device *dev)
417
{
418
        struct shaper *sh=dev->priv;
419
        return shaper_qframe(sh, skb);
420
}
421
 
422
static struct enet_statistics *shaper_get_stats(struct device *dev)
423
{
424
        return NULL;
425
}
426
 
427
static int shaper_header(struct sk_buff *skb, struct device *dev,
428
        unsigned short type, void *daddr, void *saddr, unsigned len)
429
{
430
        struct shaper *sh=dev->priv;
431
        if(sh_debug)
432
                printk("Shaper header\n");
433
        return sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
434
}
435
 
436
static int shaper_rebuild_header(void *eth, struct device *dev, unsigned long raddr, struct sk_buff *skb)
437
{
438
        struct shaper *sh=dev->priv;
439
        if(sh_debug)
440
                printk("Shaper rebuild header\n");
441
        return sh->rebuild_header(eth,sh->dev,raddr,skb);
442
}
443
 
444
static int shaper_attach(struct device *shdev, struct shaper *sh, struct device *dev)
445
{
446
        sh->dev = dev;
447
        sh->hard_start_xmit=dev->hard_start_xmit;
448
        sh->get_stats=dev->get_stats;
449
        if(dev->hard_header)
450
        {
451
                sh->hard_header=dev->hard_header;
452
                shdev->hard_header = shaper_header;
453
        }
454
        else
455
                shdev->hard_header = NULL;
456
 
457
        if(dev->rebuild_header)
458
        {
459
                sh->rebuild_header      = dev->rebuild_header;
460
                shdev->rebuild_header   = shaper_rebuild_header;
461
        }
462
        else
463
                shdev->rebuild_header   = NULL;
464
 
465
        shdev->hard_header_len=dev->hard_header_len;
466
        shdev->type=dev->type;
467
        shdev->addr_len=dev->addr_len;
468
        shdev->mtu=dev->mtu;
469
        sh->bitspersec=0;
470
        return 0;
471
}
472
 
473
static int shaper_ioctl(struct device *dev,  struct ifreq *ifr, int cmd)
474
{
475
        struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_data;
476
        struct shaper *sh=dev->priv;
477
        switch(ss->ss_cmd)
478
        {
479
                case SHAPER_SET_DEV:
480
                {
481
                        struct device *them=dev_get(ss->ss_name);
482
                        if(them==NULL)
483
                                return -ENODEV;
484
                        if(sh->dev)
485
                                return -EBUSY;
486
                        return shaper_attach(dev,dev->priv, them);
487
                }
488
                case SHAPER_GET_DEV:
489
                        if(sh->dev==NULL)
490
                                return -ENODEV;
491
                        strcpy(ss->ss_name, sh->dev->name);
492
                        return 0;
493
                case SHAPER_SET_SPEED:
494
                        shaper_setspeed(sh,ss->ss_speed);
495
                        return 0;
496
                case SHAPER_GET_SPEED:
497
                        ss->ss_speed=sh->bitspersec;
498
                        return 0;
499
                default:
500
                        return -EINVAL;
501
        }
502
}
503
 
504
static struct shaper *shaper_alloc(struct device *dev)
505
{
506
        struct shaper *sh=kmalloc(sizeof(struct shaper), GFP_KERNEL);
507
        if(sh==NULL)
508
                return NULL;
509
        memset(sh,0,sizeof(*sh));
510
        skb_queue_head_init(&sh->sendq);
511
        init_timer(&sh->timer);
512
        sh->timer.function=shaper_timer;
513
        sh->timer.data=(unsigned long)sh;
514
        return sh;
515
}
516
 
517
/*
518
 *      Add a shaper device to the system
519
 */
520
 
521
int shaper_probe(struct device *dev)
522
{
523
        int i;
524
 
525
        /*
526
         *      Set up the shaper.
527
         */
528
 
529
        dev->priv = shaper_alloc(dev);
530
        if(dev->priv==NULL)
531
                return -ENOMEM;
532
 
533
        dev->open               = shaper_open;
534
        dev->stop               = shaper_close;
535
        dev->hard_start_xmit    = shaper_start_xmit;
536
        dev->get_stats          = shaper_get_stats;
537
        dev->set_multicast_list = NULL;
538
 
539
        /*
540
         *      Intialise the packet queues
541
         */
542
 
543
        for(i=0;i<DEV_NUMBUFFS;i++)
544
                skb_queue_head_init(&dev->buffs[i]);
545
 
546
        /*
547
         *      Handlers for when we attach to a device.
548
         */
549
 
550
        dev->hard_header        = shaper_header;
551
        dev->rebuild_header     = shaper_rebuild_header;
552
        dev->do_ioctl           = shaper_ioctl;
553
        dev->hard_header_len    = 0;
554
        dev->type               = ARPHRD_ETHER; /* initially */
555
        dev->set_mac_address    = NULL;
556
        dev->mtu                = 1500;
557
        dev->addr_len           = 0;
558
        dev->tx_queue_len       = 10;
559
        dev->flags              = 0;
560
        dev->family             = AF_INET;
561
        dev->pa_addr            = 0;
562
        dev->pa_brdaddr         = 0;
563
        dev->pa_mask            = 0;
564
        dev->pa_alen            = 4;
565
 
566
        /*
567
         *      Shaper is ok
568
         */
569
 
570
        return 0;
571
}
572
 
573
#ifdef MODULE
574
 
575
static char devicename[9];
576
 
577
static struct device dev_shape =
578
{
579
        devicename,
580
        0, 0, 0, 0,
581
        0, 0,
582
        0, 0, 0, NULL, shaper_probe
583
};
584
 
585
int init_module(void)
586
{
587
        int i;
588
        for(i=0;i<99;i++)
589
        {
590
                sprintf(devicename,"shaper%d",i);
591
                if(dev_get(devicename)==NULL)
592
                        break;
593
        }
594
        if(i==100)
595
                return -ENFILE;
596
 
597
        printk(SHAPER_BANNER);
598
        if (register_netdev(&dev_shape) != 0)
599
                return -EIO;
600
        printk("Traffic shaper initialised.\n");
601
        return 0;
602
}
603
 
604
void cleanup_module(void)
605
{
606
        /*
607
         *      No need to check MOD_IN_USE, as sys_delete_module() checks.
608
         *      To be unloadable we must be closed and detached so we don't
609
         *      need to flush things.
610
         */
611
 
612
        unregister_netdev(&dev_shape);
613
 
614
        /*
615
         *      Free up the private structure, or leak memory :-)
616
         */
617
 
618
        kfree(dev_shape.priv);
619
        dev_shape.priv = NULL;
620
}
621
 
622
#else
623
 
624
static struct device dev_sh0 =
625
{
626
        "shaper0",
627
        0, 0, 0, 0,
628
        0, 0,
629
        0, 0, 0, NULL, shaper_probe
630
};
631
 
632
 
633
static struct device dev_sh1 =
634
{
635
        "shaper1",
636
        0, 0, 0, 0,
637
        0, 0,
638
        0, 0, 0, NULL, shaper_probe
639
};
640
 
641
 
642
static struct device dev_sh2 =
643
{
644
        "shaper2",
645
        0, 0, 0, 0,
646
        0, 0,
647
        0, 0, 0, NULL, shaper_probe
648
};
649
 
650
static struct device dev_sh3 =
651
{
652
        "shaper3",
653
        0, 0, 0, 0,
654
        0, 0,
655
        0, 0, 0, NULL, shaper_probe
656
};
657
 
658
void shaper_init(void)
659
{
660
        register_netdev(&dev_sh0);
661
        register_netdev(&dev_sh1);
662
        register_netdev(&dev_sh2);
663
        register_netdev(&dev_sh3);
664
}
665
 
666
#endif /* MODULE */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.