OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [sctp/] [ulpqueue.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* SCTP kernel reference Implementation
2
 * Copyright (c) 1999-2000 Cisco, Inc.
3
 * Copyright (c) 1999-2001 Motorola, Inc.
4
 * Copyright (c) 2001-2003 International Business Machines, Corp.
5
 * Copyright (c) 2001 Intel Corp.
6
 * Copyright (c) 2001 Nokia, Inc.
7
 * Copyright (c) 2001 La Monte H.P. Yarroll
8
 *
9
 * This abstraction carries sctp events to the ULP (sockets).
10
 *
11
 * The SCTP reference implementation is free software;
12
 * you can redistribute it and/or modify it under the terms of
13
 * the GNU General Public License as published by
14
 * the Free Software Foundation; either version 2, or (at your option)
15
 * any later version.
16
 *
17
 * The SCTP reference implementation is distributed in the hope that it
18
 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19
 *                 ************************
20
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21
 * See the GNU General Public License for more details.
22
 *
23
 * You should have received a copy of the GNU General Public License
24
 * along with GNU CC; see the file COPYING.  If not, write to
25
 * the Free Software Foundation, 59 Temple Place - Suite 330,
26
 * Boston, MA 02111-1307, USA.
27
 *
28
 * Please send any bug reports or fixes you make to the
29
 * email address(es):
30
 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
31
 *
32
 * Or submit a bug report through the following website:
33
 *    http://www.sf.net/projects/lksctp
34
 *
35
 * Written or modified by:
36
 *    Jon Grimm             <jgrimm@us.ibm.com>
37
 *    La Monte H.P. Yarroll <piggy@acm.org>
38
 *    Sridhar Samudrala     <sri@us.ibm.com>
39
 *
40
 * Any bugs reported given to us we will try to fix... any fixes shared will
41
 * be incorporated into the next SCTP release.
42
 */
43
 
44
#include <linux/types.h>
45
#include <linux/skbuff.h>
46
#include <net/sock.h>
47
#include <net/sctp/structs.h>
48
#include <net/sctp/sctp.h>
49
#include <net/sctp/sm.h>
50
 
51
/* Forward declarations for internal helpers.  */
52
static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53
                                                     struct sctp_ulpevent *);
54
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
55
                                                    struct sctp_ulpevent *);
56
 
57
/* 1st Level Abstractions */
58
 
59
/* Create a new ULP queue.  */
60
struct sctp_ulpq *sctp_ulpq_new(struct sctp_association *asoc, int gfp)
61
{
62
        struct sctp_ulpq *ulpq;
63
 
64
        ulpq = kmalloc(sizeof(struct sctp_ulpq), gfp);
65
        if (!ulpq)
66
                goto fail;
67
        if (!sctp_ulpq_init(ulpq, asoc))
68
                goto fail_init;
69
        ulpq->malloced = 1;
70
        return ulpq;
71
 
72
fail_init:
73
        kfree(ulpq);
74
fail:
75
        return NULL;
76
}
77
 
78
/* Initialize a ULP queue from a block of memory.  */
79
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
80
                                 struct sctp_association *asoc)
81
{
82
        memset(ulpq, sizeof(struct sctp_ulpq), 0x00);
83
 
84
        ulpq->asoc = asoc;
85
        skb_queue_head_init(&ulpq->reasm);
86
        skb_queue_head_init(&ulpq->lobby);
87
        ulpq->pd_mode  = 0;
88
        ulpq->malloced = 0;
89
 
90
        return ulpq;
91
}
92
 
93
 
94
/* Flush the reassembly and ordering queues.  */
95
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
96
{
97
        struct sk_buff *skb;
98
        struct sctp_ulpevent *event;
99
 
100
        while ((skb = __skb_dequeue(&ulpq->lobby))) {
101
                event = sctp_skb2event(skb);
102
                sctp_ulpevent_free(event);
103
        }
104
 
105
        while ((skb = __skb_dequeue(&ulpq->reasm))) {
106
                event = sctp_skb2event(skb);
107
                sctp_ulpevent_free(event);
108
        }
109
 
110
}
111
 
112
/* Dispose of a ulpqueue.  */
113
void sctp_ulpq_free(struct sctp_ulpq *ulpq)
114
{
115
        sctp_ulpq_flush(ulpq);
116
        if (ulpq->malloced)
117
                kfree(ulpq);
118
}
119
 
120
/* Process an incoming DATA chunk.  */
121
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
122
                        int gfp)
123
{
124
        struct sk_buff_head temp;
125
        sctp_data_chunk_t *hdr;
126
        struct sctp_ulpevent *event;
127
 
128
        hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
129
 
130
        /* Create an event from the incoming chunk. */
131
        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
132
        if (!event)
133
                return -ENOMEM;
134
 
135
        /* Do reassembly if needed.  */
136
        event = sctp_ulpq_reasm(ulpq, event);
137
 
138
        /* Do ordering if needed.  */
139
        if ((event) && (event->msg_flags & MSG_EOR)){
140
                /* Create a temporary list to collect chunks on.  */
141
                skb_queue_head_init(&temp);
142
                __skb_queue_tail(&temp, sctp_event2skb(event));
143
 
144
                event = sctp_ulpq_order(ulpq, event);
145
        }
146
 
147
        /* Send event to the ULP.  */
148
        if (event)
149
                sctp_ulpq_tail_event(ulpq, event);
150
 
151
        return 0;
152
}
153
 
154
/* Add a new event for propagation to the ULP.  */
155
/* Clear the partial delivery mode for this socket.   Note: This
156
 * assumes that no association is currently in partial delivery mode.
157
 */
158
int sctp_clear_pd(struct sock *sk)
159
{
160
        struct sctp_opt *sp;
161
        sp = sctp_sk(sk);
162
 
163
        sp->pd_mode = 0;
164
        if (!skb_queue_empty(&sp->pd_lobby)) {
165
                struct list_head *list;
166
                sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
167
                list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
168
                INIT_LIST_HEAD(list);
169
                return 1;
170
        }
171
        return 0;
172
}
173
 
174
/* Clear the pd_mode and restart any pending messages waiting for delivery. */
175
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
176
{
177
        ulpq->pd_mode = 0;
178
        return sctp_clear_pd(ulpq->asoc->base.sk);
179
}
180
 
181
 
182
 
183
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
184
{
185
        struct sock *sk = ulpq->asoc->base.sk;
186
        struct sk_buff_head *queue;
187
        int clear_pd = 0;
188
 
189
        /* If the socket is just going to throw this away, do not
190
         * even try to deliver it.
191
         */
192
        if (sk->dead || (sk->sk_shutdown & RCV_SHUTDOWN))
193
                goto out_free;
194
 
195
        /* Check if the user wishes to receive this event.  */
196
        if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
197
                goto out_free;
198
 
199
        /* If we are in partial delivery mode, post to the lobby until
200
         * partial delivery is cleared, unless, of course _this_ is
201
         * the association the cause of the partial delivery.
202
         */
203
 
204
        if (!sctp_sk(sk)->pd_mode) {
205
                queue = &sk->sk_receive_queue;
206
        } else if (ulpq->pd_mode) {
207
                if (event->msg_flags & MSG_NOTIFICATION)
208
                        queue = &sctp_sk(sk)->pd_lobby;
209
                else {
210
                        clear_pd = event->msg_flags & MSG_EOR;
211
                        queue = &sk->sk_receive_queue;
212
                }
213
        } else
214
                queue = &sctp_sk(sk)->pd_lobby;
215
 
216
 
217
        /* If we are harvesting multiple skbs they will be
218
         * collected on a list.
219
         */
220
        if (sctp_event2skb(event)->list)
221
                sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
222
        else
223
                __skb_queue_tail(queue, sctp_event2skb(event));
224
 
225
        /* Did we just complete partial delivery and need to get
226
         * rolling again?  Move pending data to the receive
227
         * queue.
228
         */
229
        if (clear_pd)
230
                sctp_ulpq_clear_pd(ulpq);
231
 
232
        if (queue == &sk->sk_receive_queue)
233
                sk->sk_data_ready(sk, 0);
234
        return 1;
235
 
236
out_free:
237
        if (sctp_event2skb(event)->list)
238
                sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
239
        else
240
                sctp_ulpevent_free(event);
241
        return 0;
242
}
243
 
244
/* 2nd Level Abstractions */
245
 
246
/* Helper function to store chunks that need to be reassembled.  */
247
static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
248
                                         struct sctp_ulpevent *event)
249
{
250
        struct sk_buff *pos;
251
        struct sctp_ulpevent *cevent;
252
        __u32 tsn, ctsn;
253
 
254
        tsn = event->sndrcvinfo.sinfo_tsn;
255
 
256
        /* See if it belongs at the end. */
257
        pos = skb_peek_tail(&ulpq->reasm);
258
        if (!pos) {
259
                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
260
                return;
261
        }
262
 
263
        /* Short circuit just dropping it at the end. */
264
        cevent = sctp_skb2event(pos);
265
        ctsn = cevent->sndrcvinfo.sinfo_tsn;
266
        if (TSN_lt(ctsn, tsn)) {
267
                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
268
                return;
269
        }
270
 
271
        /* Find the right place in this list. We store them by TSN.  */
272
        skb_queue_walk(&ulpq->reasm, pos) {
273
                cevent = sctp_skb2event(pos);
274
                ctsn = cevent->sndrcvinfo.sinfo_tsn;
275
 
276
                if (TSN_lt(tsn, ctsn))
277
                        break;
278
        }
279
 
280
        /* Insert before pos. */
281
        __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
282
 
283
}
284
 
285
/* Helper function to return an event corresponding to the reassembled
286
 * datagram.
287
 * This routine creates a re-assembled skb given the first and last skb's
288
 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
289
 * payload was fragmented on the way and ip had to reassemble them.
290
 * We add the rest of skb's to the first skb's fraglist.
291
 */
292
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
293
{
294
        struct sk_buff *pos;
295
        struct sctp_ulpevent *event;
296
        struct sk_buff *pnext, *last;
297
        struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
298
 
299
        /* Store the pointer to the 2nd skb */
300
        if (f_frag == l_frag)
301
                pos = NULL;
302
        else
303
                pos = f_frag->next;
304
 
305
        /* Get the last skb in the f_frag's frag_list if present. */
306
        for (last = list; list; last = list, list = list->next);
307
 
308
        /* Add the list of remaining fragments to the first fragments
309
         * frag_list.
310
         */
311
        if (last)
312
                last->next = pos;
313
        else
314
                skb_shinfo(f_frag)->frag_list = pos;
315
 
316
        /* Remove the first fragment from the reassembly queue.  */
317
        __skb_unlink(f_frag, f_frag->list);
318
        while (pos) {
319
 
320
                pnext = pos->next;
321
 
322
                /* Update the len and data_len fields of the first fragment. */
323
                f_frag->len += pos->len;
324
                f_frag->data_len += pos->len;
325
 
326
                /* Remove the fragment from the reassembly queue.  */
327
                __skb_unlink(pos, pos->list);
328
 
329
                /* Break if we have reached the last fragment.  */
330
                if (pos == l_frag)
331
                        break;
332
                pos->next = pnext;
333
                pos = pnext;
334
        };
335
 
336
        event = sctp_skb2event(f_frag);
337
        SCTP_INC_STATS(SctpReasmUsrMsgs);
338
 
339
        return event;
340
}
341
 
342
 
343
/* Helper function to check if an incoming chunk has filled up the last
344
 * missing fragment in a SCTP datagram and return the corresponding event.
345
 */
346
static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
347
{
348
        struct sk_buff *pos;
349
        struct sctp_ulpevent *cevent;
350
        struct sk_buff *first_frag = NULL;
351
        __u32 ctsn, next_tsn;
352
        struct sctp_ulpevent *retval = NULL;
353
 
354
        /* Initialized to 0 just to avoid compiler warning message.  Will
355
         * never be used with this value. It is referenced only after it
356
         * is set when we find the first fragment of a message.
357
         */
358
        next_tsn = 0;
359
 
360
        /* The chunks are held in the reasm queue sorted by TSN.
361
         * Walk through the queue sequentially and look for a sequence of
362
         * fragmented chunks that complete a datagram.
363
         * 'first_frag' and next_tsn are reset when we find a chunk which
364
         * is the first fragment of a datagram. Once these 2 fields are set
365
         * we expect to find the remaining middle fragments and the last
366
         * fragment in order. If not, first_frag is reset to NULL and we
367
         * start the next pass when we find another first fragment.
368
         */
369
        skb_queue_walk(&ulpq->reasm, pos) {
370
                cevent = sctp_skb2event(pos);
371
                ctsn = cevent->sndrcvinfo.sinfo_tsn;
372
 
373
                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
374
                case SCTP_DATA_FIRST_FRAG:
375
                        first_frag = pos;
376
                        next_tsn = ctsn + 1;
377
                        break;
378
 
379
                case SCTP_DATA_MIDDLE_FRAG:
380
                        if ((first_frag) && (ctsn == next_tsn))
381
                                next_tsn++;
382
                        else
383
                                first_frag = NULL;
384
                        break;
385
 
386
                case SCTP_DATA_LAST_FRAG:
387
                        if (first_frag && (ctsn == next_tsn))
388
                                goto found;
389
                        else
390
                                first_frag = NULL;
391
                        break;
392
                };
393
 
394
        }
395
done:
396
        return retval;
397
found:
398
        retval = sctp_make_reassembled_event(first_frag, pos);
399
        if (retval)
400
                retval->msg_flags |= MSG_EOR;
401
        goto done;
402
}
403
 
404
/* Retrieve the next set of fragments of a partial message. */
405
static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
406
{
407
        struct sk_buff *pos, *last_frag, *first_frag;
408
        struct sctp_ulpevent *cevent;
409
        __u32 ctsn, next_tsn;
410
        int is_last;
411
        struct sctp_ulpevent *retval;
412
 
413
        /* The chunks are held in the reasm queue sorted by TSN.
414
         * Walk through the queue sequentially and look for the first
415
         * sequence of fragmented chunks.
416
         */
417
 
418
        if (skb_queue_empty(&ulpq->reasm))
419
                return NULL;
420
 
421
        last_frag = first_frag = NULL;
422
        retval = NULL;
423
        next_tsn = 0;
424
        is_last = 0;
425
 
426
        skb_queue_walk(&ulpq->reasm, pos) {
427
                cevent = sctp_skb2event(pos);
428
                ctsn = cevent->sndrcvinfo.sinfo_tsn;
429
 
430
                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
431
                case SCTP_DATA_MIDDLE_FRAG:
432
                        if (!first_frag) {
433
                                first_frag = pos;
434
                                next_tsn = ctsn + 1;
435
                                last_frag = pos;
436
                        } else if (next_tsn == ctsn)
437
                                next_tsn++;
438
                        else
439
                                goto done;
440
                        break;
441
                case SCTP_DATA_LAST_FRAG:
442
                        if (!first_frag)
443
                                first_frag = pos;
444
                        else if (ctsn != next_tsn)
445
                                goto done;
446
                        last_frag = pos;
447
                        is_last = 1;
448
                        goto done;
449
                default:
450
                        return NULL;
451
                };
452
        }
453
 
454
        /* We have the reassembled event. There is no need to look
455
         * further.
456
         */
457
done:
458
        retval = sctp_make_reassembled_event(first_frag, last_frag);
459
        if (retval && is_last)
460
                retval->msg_flags |= MSG_EOR;
461
 
462
        return retval;
463
}
464
 
465
 
466
/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
467
 * need reassembling.
468
 */
469
static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
470
                                                   struct sctp_ulpevent *event)
471
{
472
        struct sctp_ulpevent *retval = NULL;
473
 
474
        /* Check if this is part of a fragmented message.  */
475
        if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
476
                event->msg_flags |= MSG_EOR;
477
                return event;
478
        }
479
 
480
        sctp_ulpq_store_reasm(ulpq, event);
481
        if (!ulpq->pd_mode)
482
                retval = sctp_ulpq_retrieve_reassembled(ulpq);
483
        else {
484
                __u32 ctsn, ctsnap;
485
 
486
                /* Do not even bother unless this is the next tsn to
487
                 * be delivered.
488
                 */
489
                ctsn = event->sndrcvinfo.sinfo_tsn;
490
                ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
491
                if (TSN_lte(ctsn, ctsnap))
492
                        retval = sctp_ulpq_retrieve_partial(ulpq);
493
        }
494
 
495
        return retval;
496
}
497
 
498
/* Retrieve the first part (sequential fragments) for partial delivery.  */
499
static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
500
{
501
        struct sk_buff *pos, *last_frag, *first_frag;
502
        struct sctp_ulpevent *cevent;
503
        __u32 ctsn, next_tsn;
504
        struct sctp_ulpevent *retval;
505
 
506
        /* The chunks are held in the reasm queue sorted by TSN.
507
         * Walk through the queue sequentially and look for a sequence of
508
         * fragmented chunks that start a datagram.
509
         */
510
 
511
        if (skb_queue_empty(&ulpq->reasm))
512
                return NULL;
513
 
514
        last_frag = first_frag = NULL;
515
        retval = NULL;
516
        next_tsn = 0;
517
 
518
        skb_queue_walk(&ulpq->reasm, pos) {
519
                cevent = sctp_skb2event(pos);
520
                ctsn = cevent->sndrcvinfo.sinfo_tsn;
521
 
522
                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
523
                case SCTP_DATA_FIRST_FRAG:
524
                        if (!first_frag) {
525
                                first_frag = pos;
526
                                next_tsn = ctsn + 1;
527
                                last_frag = pos;
528
                        } else
529
                                goto done;
530
                        break;
531
 
532
                case SCTP_DATA_MIDDLE_FRAG:
533
                        if (!first_frag)
534
                                return NULL;
535
                        if (ctsn == next_tsn) {
536
                                next_tsn++;
537
                                last_frag = pos;
538
                        } else
539
                                goto done;
540
                        break;
541
                default:
542
                        return NULL;
543
                };
544
        }
545
 
546
        /* We have the reassembled event. There is no need to look
547
         * further.
548
         */
549
done:
550
        retval = sctp_make_reassembled_event(first_frag, last_frag);
551
        return retval;
552
}
553
 
554
/* Helper function to gather skbs that have possibly become
555
 * ordered by an an incoming chunk.
556
 */
557
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
558
                                              struct sctp_ulpevent *event)
559
{
560
        struct sk_buff *pos, *tmp;
561
        struct sctp_ulpevent *cevent;
562
        struct sctp_stream *in;
563
        __u16 sid, csid;
564
        __u16 ssn, cssn;
565
 
566
        sid = event->sndrcvinfo.sinfo_stream;
567
        ssn = event->sndrcvinfo.sinfo_ssn;
568
        in  = &ulpq->asoc->ssnmap->in;
569
 
570
        /* We are holding the chunks by stream, by SSN.  */
571
        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
572
                cevent = (struct sctp_ulpevent *) pos->cb;
573
                csid = cevent->sndrcvinfo.sinfo_stream;
574
                cssn = cevent->sndrcvinfo.sinfo_ssn;
575
 
576
                /* Have we gone too far?  */
577
                if (csid > sid)
578
                        break;
579
 
580
                /* Have we not gone far enough?  */
581
                if (csid < sid)
582
                        continue;
583
 
584
                if (cssn != sctp_ssn_peek(in, sid))
585
                        break;
586
 
587
                /* Found it, so mark in the ssnmap. */
588
                sctp_ssn_next(in, sid);
589
 
590
                __skb_unlink(pos, pos->list);
591
 
592
                /* Attach all gathered skbs to the event.  */
593
                __skb_queue_tail(sctp_event2skb(event)->list, pos);
594
        }
595
}
596
 
597
/* Helper function to store chunks needing ordering.  */
598
static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
599
                                           struct sctp_ulpevent *event)
600
{
601
        struct sk_buff *pos;
602
        struct sctp_ulpevent *cevent;
603
        __u16 sid, csid;
604
        __u16 ssn, cssn;
605
 
606
        pos = skb_peek_tail(&ulpq->lobby);
607
        if (!pos) {
608
                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
609
                return;
610
        }
611
 
612
        sid = event->sndrcvinfo.sinfo_stream;
613
        ssn = event->sndrcvinfo.sinfo_ssn;
614
 
615
        cevent = (struct sctp_ulpevent *) pos->cb;
616
        csid = cevent->sndrcvinfo.sinfo_stream;
617
        cssn = cevent->sndrcvinfo.sinfo_ssn;
618
        if (sid > csid) {
619
                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
620
                return;
621
        }
622
 
623
        if ((sid == csid) && SSN_lt(cssn, ssn)) {
624
                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
625
                return;
626
        }
627
 
628
        /* Find the right place in this list.  We store them by
629
         * stream ID and then by SSN.
630
         */
631
        skb_queue_walk(&ulpq->lobby, pos) {
632
                cevent = (struct sctp_ulpevent *) pos->cb;
633
                csid = cevent->sndrcvinfo.sinfo_stream;
634
                cssn = cevent->sndrcvinfo.sinfo_ssn;
635
 
636
                if (csid > sid)
637
                        break;
638
                if (csid == sid && SSN_lt(ssn, cssn))
639
                        break;
640
        }
641
 
642
 
643
        /* Insert before pos. */
644
        __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
645
 
646
}
647
 
648
static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
649
                                        struct sctp_ulpevent *event)
650
{
651
        __u16 sid, ssn;
652
        struct sctp_stream *in;
653
 
654
        /* Check if this message needs ordering.  */
655
        if (SCTP_DATA_UNORDERED & event->msg_flags)
656
                return event;
657
 
658
        /* Note: The stream ID must be verified before this routine.  */
659
        sid = event->sndrcvinfo.sinfo_stream;
660
        ssn = event->sndrcvinfo.sinfo_ssn;
661
        in  = &ulpq->asoc->ssnmap->in;
662
 
663
        /* Is this the expected SSN for this stream ID?  */
664
        if (ssn != sctp_ssn_peek(in, sid)) {
665
                /* We've received something out of order, so find where it
666
                 * needs to be placed.  We order by stream and then by SSN.
667
                 */
668
                sctp_ulpq_store_ordered(ulpq, event);
669
                return NULL;
670
        }
671
 
672
        /* Mark that the next chunk has been found.  */
673
        sctp_ssn_next(in, sid);
674
 
675
        /* Go find any other chunks that were waiting for
676
         * ordering.
677
         */
678
        sctp_ulpq_retrieve_ordered(ulpq, event);
679
 
680
        return event;
681
}
682
 
683
/* Renege 'needed' bytes from the ordering queue. */
684
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
685
{
686
        __u16 freed = 0;
687
        __u32 tsn;
688
        struct sk_buff *skb;
689
        struct sctp_ulpevent *event;
690
        struct sctp_tsnmap *tsnmap;
691
 
692
        tsnmap = &ulpq->asoc->peer.tsn_map;
693
 
694
        while ((skb = __skb_dequeue_tail(&ulpq->lobby))) {
695
                freed += skb_headlen(skb);
696
                event = sctp_skb2event(skb);
697
                tsn = event->sndrcvinfo.sinfo_tsn;
698
 
699
                sctp_ulpevent_free(event);
700
                sctp_tsnmap_renege(tsnmap, tsn);
701
                if (freed >= needed)
702
                        return freed;
703
        }
704
 
705
        return freed;
706
}
707
 
708
/* Renege 'needed' bytes from the reassembly queue. */
709
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
710
{
711
        __u16 freed = 0;
712
        __u32 tsn;
713
        struct sk_buff *skb;
714
        struct sctp_ulpevent *event;
715
        struct sctp_tsnmap *tsnmap;
716
 
717
        tsnmap = &ulpq->asoc->peer.tsn_map;
718
 
719
        /* Walk backwards through the list, reneges the newest tsns. */
720
        while ((skb = __skb_dequeue_tail(&ulpq->reasm))) {
721
                freed += skb_headlen(skb);
722
                event = sctp_skb2event(skb);
723
                tsn = event->sndrcvinfo.sinfo_tsn;
724
 
725
                sctp_ulpevent_free(event);
726
                sctp_tsnmap_renege(tsnmap, tsn);
727
                if (freed >= needed)
728
                        return freed;
729
        }
730
 
731
        return freed;
732
}
733
 
734
/* Partial deliver the first message as there is pressure on rwnd. */
735
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
736
                                struct sctp_chunk *chunk, int gfp)
737
{
738
        struct sctp_ulpevent *event;
739
        struct sctp_association *asoc;
740
 
741
        asoc = ulpq->asoc;
742
 
743
        /* Are we already in partial delivery mode?  */
744
        if (!sctp_sk(asoc->base.sk)->pd_mode) {
745
 
746
                /* Is partial delivery possible?  */
747
                event = sctp_ulpq_retrieve_first(ulpq);
748
                /* Send event to the ULP.   */
749
                if (event) {
750
                        sctp_ulpq_tail_event(ulpq, event);
751
                        sctp_sk(asoc->base.sk)->pd_mode = 1;
752
                        ulpq->pd_mode = 1;
753
                        return;
754
                }
755
        }
756
}
757
 
758
/* Renege some packets to make room for an incoming chunk.  */
759
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
760
                      int gfp)
761
{
762
        struct sctp_association *asoc;
763
        __u16 needed, freed;
764
 
765
        asoc = ulpq->asoc;
766
 
767
        if (chunk) {
768
                needed = ntohs(chunk->chunk_hdr->length);
769
                needed -= sizeof(sctp_data_chunk_t);
770
        } else
771
                needed = SCTP_DEFAULT_MAXWINDOW;
772
 
773
        freed = 0;
774
 
775
        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
776
                freed = sctp_ulpq_renege_order(ulpq, needed);
777
                if (freed < needed) {
778
                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
779
                }
780
        }
781
        /* If able to free enough room, accept this chunk. */
782
        if (chunk && (freed >= needed)) {
783
                __u32 tsn;
784
                tsn = ntohl(chunk->subh.data_hdr->tsn);
785
                sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
786
                sctp_ulpq_tail_data(ulpq, chunk, gfp);
787
 
788
                sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
789
        }
790
 
791
        return;
792
}
793
 
794
 
795
 
796
/* Notify the application if an association is aborted and in
797
 * partial delivery mode.  Send up any pending received messages.
798
 */
799
void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp)
800
{
801
        struct sctp_ulpevent *ev = NULL;
802
        struct sock *sk;
803
 
804
        if (!ulpq->pd_mode)
805
                return;
806
 
807
        sk = ulpq->asoc->base.sk;
808
        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
809
                                       &sctp_sk(sk)->subscribe))
810
                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
811
                                              SCTP_PARTIAL_DELIVERY_ABORTED,
812
                                              gfp);
813
        if (ev)
814
                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
815
 
816
        /* If there is data waiting, send it up the socket now. */
817
        if (sctp_ulpq_clear_pd(ulpq) || ev)
818
                sk->sk_data_ready(sk, 0);
819
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.