OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [net/] [sunrpc/] [xdr.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * linux/net/sunrpc/xdr.c
3
 *
4
 * Generic XDR support.
5
 *
6
 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7
 */
8
 
9
#include <linux/module.h>
10
#include <linux/types.h>
11
#include <linux/string.h>
12
#include <linux/kernel.h>
13
#include <linux/pagemap.h>
14
#include <linux/errno.h>
15
#include <linux/sunrpc/xdr.h>
16
#include <linux/sunrpc/msg_prot.h>
17
 
18
/*
19
 * XDR functions for basic NFS types
20
 */
21
__be32 *
22
xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
23
{
24
        unsigned int    quadlen = XDR_QUADLEN(obj->len);
25
 
26
        p[quadlen] = 0;          /* zero trailing bytes */
27
        *p++ = htonl(obj->len);
28
        memcpy(p, obj->data, obj->len);
29
        return p + XDR_QUADLEN(obj->len);
30
}
31
 
32
__be32 *
33
xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
34
{
35
        unsigned int    len;
36
 
37
        if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
38
                return NULL;
39
        obj->len  = len;
40
        obj->data = (u8 *) p;
41
        return p + XDR_QUADLEN(len);
42
}
43
 
44
/**
45
 * xdr_encode_opaque_fixed - Encode fixed length opaque data
46
 * @p: pointer to current position in XDR buffer.
47
 * @ptr: pointer to data to encode (or NULL)
48
 * @nbytes: size of data.
49
 *
50
 * Copy the array of data of length nbytes at ptr to the XDR buffer
51
 * at position p, then align to the next 32-bit boundary by padding
52
 * with zero bytes (see RFC1832).
53
 * Note: if ptr is NULL, only the padding is performed.
54
 *
55
 * Returns the updated current XDR buffer position
56
 *
57
 */
58
__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
59
{
60
        if (likely(nbytes != 0)) {
61
                unsigned int quadlen = XDR_QUADLEN(nbytes);
62
                unsigned int padding = (quadlen << 2) - nbytes;
63
 
64
                if (ptr != NULL)
65
                        memcpy(p, ptr, nbytes);
66
                if (padding != 0)
67
                        memset((char *)p + nbytes, 0, padding);
68
                p += quadlen;
69
        }
70
        return p;
71
}
72
EXPORT_SYMBOL(xdr_encode_opaque_fixed);
73
 
74
/**
75
 * xdr_encode_opaque - Encode variable length opaque data
76
 * @p: pointer to current position in XDR buffer.
77
 * @ptr: pointer to data to encode (or NULL)
78
 * @nbytes: size of data.
79
 *
80
 * Returns the updated current XDR buffer position
81
 */
82
__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
83
{
84
        *p++ = htonl(nbytes);
85
        return xdr_encode_opaque_fixed(p, ptr, nbytes);
86
}
87
EXPORT_SYMBOL(xdr_encode_opaque);
88
 
89
__be32 *
90
xdr_encode_string(__be32 *p, const char *string)
91
{
92
        return xdr_encode_array(p, string, strlen(string));
93
}
94
 
95
__be32 *
96
xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
97
{
98
        unsigned int    len;
99
 
100
        if ((len = ntohl(*p++)) > maxlen)
101
                return NULL;
102
        *lenp = len;
103
        *sp = (char *) p;
104
        return p + XDR_QUADLEN(len);
105
}
106
 
107
void
108
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
109
                 unsigned int len)
110
{
111
        struct kvec *tail = xdr->tail;
112
        u32 *p;
113
 
114
        xdr->pages = pages;
115
        xdr->page_base = base;
116
        xdr->page_len = len;
117
 
118
        p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
119
        tail->iov_base = p;
120
        tail->iov_len = 0;
121
 
122
        if (len & 3) {
123
                unsigned int pad = 4 - (len & 3);
124
 
125
                *p = 0;
126
                tail->iov_base = (char *)p + (len & 3);
127
                tail->iov_len  = pad;
128
                len += pad;
129
        }
130
        xdr->buflen += len;
131
        xdr->len += len;
132
}
133
 
134
void
135
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
136
                 struct page **pages, unsigned int base, unsigned int len)
137
{
138
        struct kvec *head = xdr->head;
139
        struct kvec *tail = xdr->tail;
140
        char *buf = (char *)head->iov_base;
141
        unsigned int buflen = head->iov_len;
142
 
143
        head->iov_len  = offset;
144
 
145
        xdr->pages = pages;
146
        xdr->page_base = base;
147
        xdr->page_len = len;
148
 
149
        tail->iov_base = buf + offset;
150
        tail->iov_len = buflen - offset;
151
 
152
        xdr->buflen += len;
153
}
154
 
155
 
156
/*
157
 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
158
 *
159
 * _shift_data_right_pages
160
 * @pages: vector of pages containing both the source and dest memory area.
161
 * @pgto_base: page vector address of destination
162
 * @pgfrom_base: page vector address of source
163
 * @len: number of bytes to copy
164
 *
165
 * Note: the addresses pgto_base and pgfrom_base are both calculated in
166
 *       the same way:
167
 *            if a memory area starts at byte 'base' in page 'pages[i]',
168
 *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
169
 * Also note: pgfrom_base must be < pgto_base, but the memory areas
170
 *      they point to may overlap.
171
 */
172
static void
173
_shift_data_right_pages(struct page **pages, size_t pgto_base,
174
                size_t pgfrom_base, size_t len)
175
{
176
        struct page **pgfrom, **pgto;
177
        char *vfrom, *vto;
178
        size_t copy;
179
 
180
        BUG_ON(pgto_base <= pgfrom_base);
181
 
182
        pgto_base += len;
183
        pgfrom_base += len;
184
 
185
        pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
186
        pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
187
 
188
        pgto_base &= ~PAGE_CACHE_MASK;
189
        pgfrom_base &= ~PAGE_CACHE_MASK;
190
 
191
        do {
192
                /* Are any pointers crossing a page boundary? */
193
                if (pgto_base == 0) {
194
                        pgto_base = PAGE_CACHE_SIZE;
195
                        pgto--;
196
                }
197
                if (pgfrom_base == 0) {
198
                        pgfrom_base = PAGE_CACHE_SIZE;
199
                        pgfrom--;
200
                }
201
 
202
                copy = len;
203
                if (copy > pgto_base)
204
                        copy = pgto_base;
205
                if (copy > pgfrom_base)
206
                        copy = pgfrom_base;
207
                pgto_base -= copy;
208
                pgfrom_base -= copy;
209
 
210
                vto = kmap_atomic(*pgto, KM_USER0);
211
                vfrom = kmap_atomic(*pgfrom, KM_USER1);
212
                memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
213
                flush_dcache_page(*pgto);
214
                kunmap_atomic(vfrom, KM_USER1);
215
                kunmap_atomic(vto, KM_USER0);
216
 
217
        } while ((len -= copy) != 0);
218
}
219
 
220
/*
221
 * _copy_to_pages
222
 * @pages: array of pages
223
 * @pgbase: page vector address of destination
224
 * @p: pointer to source data
225
 * @len: length
226
 *
227
 * Copies data from an arbitrary memory location into an array of pages
228
 * The copy is assumed to be non-overlapping.
229
 */
230
static void
231
_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
232
{
233
        struct page **pgto;
234
        char *vto;
235
        size_t copy;
236
 
237
        pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
238
        pgbase &= ~PAGE_CACHE_MASK;
239
 
240
        do {
241
                copy = PAGE_CACHE_SIZE - pgbase;
242
                if (copy > len)
243
                        copy = len;
244
 
245
                vto = kmap_atomic(*pgto, KM_USER0);
246
                memcpy(vto + pgbase, p, copy);
247
                kunmap_atomic(vto, KM_USER0);
248
 
249
                pgbase += copy;
250
                if (pgbase == PAGE_CACHE_SIZE) {
251
                        flush_dcache_page(*pgto);
252
                        pgbase = 0;
253
                        pgto++;
254
                }
255
                p += copy;
256
 
257
        } while ((len -= copy) != 0);
258
        flush_dcache_page(*pgto);
259
}
260
 
261
/*
262
 * _copy_from_pages
263
 * @p: pointer to destination
264
 * @pages: array of pages
265
 * @pgbase: offset of source data
266
 * @len: length
267
 *
268
 * Copies data into an arbitrary memory location from an array of pages
269
 * The copy is assumed to be non-overlapping.
270
 */
271
static void
272
_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
273
{
274
        struct page **pgfrom;
275
        char *vfrom;
276
        size_t copy;
277
 
278
        pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
279
        pgbase &= ~PAGE_CACHE_MASK;
280
 
281
        do {
282
                copy = PAGE_CACHE_SIZE - pgbase;
283
                if (copy > len)
284
                        copy = len;
285
 
286
                vfrom = kmap_atomic(*pgfrom, KM_USER0);
287
                memcpy(p, vfrom + pgbase, copy);
288
                kunmap_atomic(vfrom, KM_USER0);
289
 
290
                pgbase += copy;
291
                if (pgbase == PAGE_CACHE_SIZE) {
292
                        pgbase = 0;
293
                        pgfrom++;
294
                }
295
                p += copy;
296
 
297
        } while ((len -= copy) != 0);
298
}
299
 
300
/*
301
 * xdr_shrink_bufhead
302
 * @buf: xdr_buf
303
 * @len: bytes to remove from buf->head[0]
304
 *
305
 * Shrinks XDR buffer's header kvec buf->head[0] by
306
 * 'len' bytes. The extra data is not lost, but is instead
307
 * moved into the inlined pages and/or the tail.
308
 */
309
static void
310
xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
311
{
312
        struct kvec *head, *tail;
313
        size_t copy, offs;
314
        unsigned int pglen = buf->page_len;
315
 
316
        tail = buf->tail;
317
        head = buf->head;
318
        BUG_ON (len > head->iov_len);
319
 
320
        /* Shift the tail first */
321
        if (tail->iov_len != 0) {
322
                if (tail->iov_len > len) {
323
                        copy = tail->iov_len - len;
324
                        memmove((char *)tail->iov_base + len,
325
                                        tail->iov_base, copy);
326
                }
327
                /* Copy from the inlined pages into the tail */
328
                copy = len;
329
                if (copy > pglen)
330
                        copy = pglen;
331
                offs = len - copy;
332
                if (offs >= tail->iov_len)
333
                        copy = 0;
334
                else if (copy > tail->iov_len - offs)
335
                        copy = tail->iov_len - offs;
336
                if (copy != 0)
337
                        _copy_from_pages((char *)tail->iov_base + offs,
338
                                        buf->pages,
339
                                        buf->page_base + pglen + offs - len,
340
                                        copy);
341
                /* Do we also need to copy data from the head into the tail ? */
342
                if (len > pglen) {
343
                        offs = copy = len - pglen;
344
                        if (copy > tail->iov_len)
345
                                copy = tail->iov_len;
346
                        memcpy(tail->iov_base,
347
                                        (char *)head->iov_base +
348
                                        head->iov_len - offs,
349
                                        copy);
350
                }
351
        }
352
        /* Now handle pages */
353
        if (pglen != 0) {
354
                if (pglen > len)
355
                        _shift_data_right_pages(buf->pages,
356
                                        buf->page_base + len,
357
                                        buf->page_base,
358
                                        pglen - len);
359
                copy = len;
360
                if (len > pglen)
361
                        copy = pglen;
362
                _copy_to_pages(buf->pages, buf->page_base,
363
                                (char *)head->iov_base + head->iov_len - len,
364
                                copy);
365
        }
366
        head->iov_len -= len;
367
        buf->buflen -= len;
368
        /* Have we truncated the message? */
369
        if (buf->len > buf->buflen)
370
                buf->len = buf->buflen;
371
}
372
 
373
/*
374
 * xdr_shrink_pagelen
375
 * @buf: xdr_buf
376
 * @len: bytes to remove from buf->pages
377
 *
378
 * Shrinks XDR buffer's page array buf->pages by
379
 * 'len' bytes. The extra data is not lost, but is instead
380
 * moved into the tail.
381
 */
382
static void
383
xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
384
{
385
        struct kvec *tail;
386
        size_t copy;
387
        char *p;
388
        unsigned int pglen = buf->page_len;
389
 
390
        tail = buf->tail;
391
        BUG_ON (len > pglen);
392
 
393
        /* Shift the tail first */
394
        if (tail->iov_len != 0) {
395
                p = (char *)tail->iov_base + len;
396
                if (tail->iov_len > len) {
397
                        copy = tail->iov_len - len;
398
                        memmove(p, tail->iov_base, copy);
399
                } else
400
                        buf->buflen -= len;
401
                /* Copy from the inlined pages into the tail */
402
                copy = len;
403
                if (copy > tail->iov_len)
404
                        copy = tail->iov_len;
405
                _copy_from_pages((char *)tail->iov_base,
406
                                buf->pages, buf->page_base + pglen - len,
407
                                copy);
408
        }
409
        buf->page_len -= len;
410
        buf->buflen -= len;
411
        /* Have we truncated the message? */
412
        if (buf->len > buf->buflen)
413
                buf->len = buf->buflen;
414
}
415
 
416
void
417
xdr_shift_buf(struct xdr_buf *buf, size_t len)
418
{
419
        xdr_shrink_bufhead(buf, len);
420
}
421
 
422
/**
423
 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
424
 * @xdr: pointer to xdr_stream struct
425
 * @buf: pointer to XDR buffer in which to encode data
426
 * @p: current pointer inside XDR buffer
427
 *
428
 * Note: at the moment the RPC client only passes the length of our
429
 *       scratch buffer in the xdr_buf's header kvec. Previously this
430
 *       meant we needed to call xdr_adjust_iovec() after encoding the
431
 *       data. With the new scheme, the xdr_stream manages the details
432
 *       of the buffer length, and takes care of adjusting the kvec
433
 *       length for us.
434
 */
435
void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
436
{
437
        struct kvec *iov = buf->head;
438
        int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
439
 
440
        BUG_ON(scratch_len < 0);
441
        xdr->buf = buf;
442
        xdr->iov = iov;
443
        xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
444
        xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
445
        BUG_ON(iov->iov_len > scratch_len);
446
 
447
        if (p != xdr->p && p != NULL) {
448
                size_t len;
449
 
450
                BUG_ON(p < xdr->p || p > xdr->end);
451
                len = (char *)p - (char *)xdr->p;
452
                xdr->p = p;
453
                buf->len += len;
454
                iov->iov_len += len;
455
        }
456
}
457
EXPORT_SYMBOL(xdr_init_encode);
458
 
459
/**
460
 * xdr_reserve_space - Reserve buffer space for sending
461
 * @xdr: pointer to xdr_stream
462
 * @nbytes: number of bytes to reserve
463
 *
464
 * Checks that we have enough buffer space to encode 'nbytes' more
465
 * bytes of data. If so, update the total xdr_buf length, and
466
 * adjust the length of the current kvec.
467
 */
468
__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
469
{
470
        __be32 *p = xdr->p;
471
        __be32 *q;
472
 
473
        /* align nbytes on the next 32-bit boundary */
474
        nbytes += 3;
475
        nbytes &= ~3;
476
        q = p + (nbytes >> 2);
477
        if (unlikely(q > xdr->end || q < p))
478
                return NULL;
479
        xdr->p = q;
480
        xdr->iov->iov_len += nbytes;
481
        xdr->buf->len += nbytes;
482
        return p;
483
}
484
EXPORT_SYMBOL(xdr_reserve_space);
485
 
486
/**
487
 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
488
 * @xdr: pointer to xdr_stream
489
 * @pages: list of pages
490
 * @base: offset of first byte
491
 * @len: length of data in bytes
492
 *
493
 */
494
void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
495
                 unsigned int len)
496
{
497
        struct xdr_buf *buf = xdr->buf;
498
        struct kvec *iov = buf->tail;
499
        buf->pages = pages;
500
        buf->page_base = base;
501
        buf->page_len = len;
502
 
503
        iov->iov_base = (char *)xdr->p;
504
        iov->iov_len  = 0;
505
        xdr->iov = iov;
506
 
507
        if (len & 3) {
508
                unsigned int pad = 4 - (len & 3);
509
 
510
                BUG_ON(xdr->p >= xdr->end);
511
                iov->iov_base = (char *)xdr->p + (len & 3);
512
                iov->iov_len  += pad;
513
                len += pad;
514
                *xdr->p++ = 0;
515
        }
516
        buf->buflen += len;
517
        buf->len += len;
518
}
519
EXPORT_SYMBOL(xdr_write_pages);
520
 
521
/**
522
 * xdr_init_decode - Initialize an xdr_stream for decoding data.
523
 * @xdr: pointer to xdr_stream struct
524
 * @buf: pointer to XDR buffer from which to decode data
525
 * @p: current pointer inside XDR buffer
526
 */
527
void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
528
{
529
        struct kvec *iov = buf->head;
530
        unsigned int len = iov->iov_len;
531
 
532
        if (len > buf->len)
533
                len = buf->len;
534
        xdr->buf = buf;
535
        xdr->iov = iov;
536
        xdr->p = p;
537
        xdr->end = (__be32 *)((char *)iov->iov_base + len);
538
}
539
EXPORT_SYMBOL(xdr_init_decode);
540
 
541
/**
542
 * xdr_inline_decode - Retrieve non-page XDR data to decode
543
 * @xdr: pointer to xdr_stream struct
544
 * @nbytes: number of bytes of data to decode
545
 *
546
 * Check if the input buffer is long enough to enable us to decode
547
 * 'nbytes' more bytes of data starting at the current position.
548
 * If so return the current pointer, then update the current
549
 * pointer position.
550
 */
551
__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
552
{
553
        __be32 *p = xdr->p;
554
        __be32 *q = p + XDR_QUADLEN(nbytes);
555
 
556
        if (unlikely(q > xdr->end || q < p))
557
                return NULL;
558
        xdr->p = q;
559
        return p;
560
}
561
EXPORT_SYMBOL(xdr_inline_decode);
562
 
563
/**
564
 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
565
 * @xdr: pointer to xdr_stream struct
566
 * @len: number of bytes of page data
567
 *
568
 * Moves data beyond the current pointer position from the XDR head[] buffer
569
 * into the page list. Any data that lies beyond current position + "len"
570
 * bytes is moved into the XDR tail[].
571
 */
572
void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
573
{
574
        struct xdr_buf *buf = xdr->buf;
575
        struct kvec *iov;
576
        ssize_t shift;
577
        unsigned int end;
578
        int padding;
579
 
580
        /* Realign pages to current pointer position */
581
        iov  = buf->head;
582
        shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
583
        if (shift > 0)
584
                xdr_shrink_bufhead(buf, shift);
585
 
586
        /* Truncate page data and move it into the tail */
587
        if (buf->page_len > len)
588
                xdr_shrink_pagelen(buf, buf->page_len - len);
589
        padding = (XDR_QUADLEN(len) << 2) - len;
590
        xdr->iov = iov = buf->tail;
591
        /* Compute remaining message length.  */
592
        end = iov->iov_len;
593
        shift = buf->buflen - buf->len;
594
        if (shift < end)
595
                end -= shift;
596
        else if (shift > 0)
597
                end = 0;
598
        /*
599
         * Position current pointer at beginning of tail, and
600
         * set remaining message length.
601
         */
602
        xdr->p = (__be32 *)((char *)iov->iov_base + padding);
603
        xdr->end = (__be32 *)((char *)iov->iov_base + end);
604
}
605
EXPORT_SYMBOL(xdr_read_pages);
606
 
607
/**
608
 * xdr_enter_page - decode data from the XDR page
609
 * @xdr: pointer to xdr_stream struct
610
 * @len: number of bytes of page data
611
 *
612
 * Moves data beyond the current pointer position from the XDR head[] buffer
613
 * into the page list. Any data that lies beyond current position + "len"
614
 * bytes is moved into the XDR tail[]. The current pointer is then
615
 * repositioned at the beginning of the first XDR page.
616
 */
617
void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
618
{
619
        char * kaddr = page_address(xdr->buf->pages[0]);
620
        xdr_read_pages(xdr, len);
621
        /*
622
         * Position current pointer at beginning of tail, and
623
         * set remaining message length.
624
         */
625
        if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
626
                len = PAGE_CACHE_SIZE - xdr->buf->page_base;
627
        xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
628
        xdr->end = (__be32 *)((char *)xdr->p + len);
629
}
630
EXPORT_SYMBOL(xdr_enter_page);
631
 
632
static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
633
 
634
void
635
xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
636
{
637
        buf->head[0] = *iov;
638
        buf->tail[0] = empty_iov;
639
        buf->page_len = 0;
640
        buf->buflen = buf->len = iov->iov_len;
641
}
642
 
643
/* Sets subbuf to the portion of buf of length len beginning base bytes
644
 * from the start of buf. Returns -1 if base of length are out of bounds. */
645
int
646
xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
647
                        unsigned int base, unsigned int len)
648
{
649
        subbuf->buflen = subbuf->len = len;
650
        if (base < buf->head[0].iov_len) {
651
                subbuf->head[0].iov_base = buf->head[0].iov_base + base;
652
                subbuf->head[0].iov_len = min_t(unsigned int, len,
653
                                                buf->head[0].iov_len - base);
654
                len -= subbuf->head[0].iov_len;
655
                base = 0;
656
        } else {
657
                subbuf->head[0].iov_base = NULL;
658
                subbuf->head[0].iov_len = 0;
659
                base -= buf->head[0].iov_len;
660
        }
661
 
662
        if (base < buf->page_len) {
663
                subbuf->page_len = min(buf->page_len - base, len);
664
                base += buf->page_base;
665
                subbuf->page_base = base & ~PAGE_CACHE_MASK;
666
                subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
667
                len -= subbuf->page_len;
668
                base = 0;
669
        } else {
670
                base -= buf->page_len;
671
                subbuf->page_len = 0;
672
        }
673
 
674
        if (base < buf->tail[0].iov_len) {
675
                subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
676
                subbuf->tail[0].iov_len = min_t(unsigned int, len,
677
                                                buf->tail[0].iov_len - base);
678
                len -= subbuf->tail[0].iov_len;
679
                base = 0;
680
        } else {
681
                subbuf->tail[0].iov_base = NULL;
682
                subbuf->tail[0].iov_len = 0;
683
                base -= buf->tail[0].iov_len;
684
        }
685
 
686
        if (base || len)
687
                return -1;
688
        return 0;
689
}
690
 
691
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
692
{
693
        unsigned int this_len;
694
 
695
        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
696
        memcpy(obj, subbuf->head[0].iov_base, this_len);
697
        len -= this_len;
698
        obj += this_len;
699
        this_len = min_t(unsigned int, len, subbuf->page_len);
700
        if (this_len)
701
                _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
702
        len -= this_len;
703
        obj += this_len;
704
        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
705
        memcpy(obj, subbuf->tail[0].iov_base, this_len);
706
}
707
 
708
/* obj is assumed to point to allocated memory of size at least len: */
709
int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
710
{
711
        struct xdr_buf subbuf;
712
        int status;
713
 
714
        status = xdr_buf_subsegment(buf, &subbuf, base, len);
715
        if (status != 0)
716
                return status;
717
        __read_bytes_from_xdr_buf(&subbuf, obj, len);
718
        return 0;
719
}
720
 
721
static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
722
{
723
        unsigned int this_len;
724
 
725
        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
726
        memcpy(subbuf->head[0].iov_base, obj, this_len);
727
        len -= this_len;
728
        obj += this_len;
729
        this_len = min_t(unsigned int, len, subbuf->page_len);
730
        if (this_len)
731
                _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
732
        len -= this_len;
733
        obj += this_len;
734
        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
735
        memcpy(subbuf->tail[0].iov_base, obj, this_len);
736
}
737
 
738
/* obj is assumed to point to allocated memory of size at least len: */
739
int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
740
{
741
        struct xdr_buf subbuf;
742
        int status;
743
 
744
        status = xdr_buf_subsegment(buf, &subbuf, base, len);
745
        if (status != 0)
746
                return status;
747
        __write_bytes_to_xdr_buf(&subbuf, obj, len);
748
        return 0;
749
}
750
 
751
int
752
xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
753
{
754
        __be32  raw;
755
        int     status;
756
 
757
        status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
758
        if (status)
759
                return status;
760
        *obj = ntohl(raw);
761
        return 0;
762
}
763
 
764
int
765
xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
766
{
767
        __be32  raw = htonl(obj);
768
 
769
        return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
770
}
771
 
772
/* If the netobj starting offset bytes from the start of xdr_buf is contained
773
 * entirely in the head or the tail, set object to point to it; otherwise
774
 * try to find space for it at the end of the tail, copy it there, and
775
 * set obj to point to it. */
776
int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
777
{
778
        struct xdr_buf subbuf;
779
 
780
        if (xdr_decode_word(buf, offset, &obj->len))
781
                return -EFAULT;
782
        if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
783
                return -EFAULT;
784
 
785
        /* Is the obj contained entirely in the head? */
786
        obj->data = subbuf.head[0].iov_base;
787
        if (subbuf.head[0].iov_len == obj->len)
788
                return 0;
789
        /* ..or is the obj contained entirely in the tail? */
790
        obj->data = subbuf.tail[0].iov_base;
791
        if (subbuf.tail[0].iov_len == obj->len)
792
                return 0;
793
 
794
        /* use end of tail as storage for obj:
795
         * (We don't copy to the beginning because then we'd have
796
         * to worry about doing a potentially overlapping copy.
797
         * This assumes the object is at most half the length of the
798
         * tail.) */
799
        if (obj->len > buf->buflen - buf->len)
800
                return -ENOMEM;
801
        if (buf->tail[0].iov_len != 0)
802
                obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
803
        else
804
                obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
805
        __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
806
        return 0;
807
}
808
 
809
/* Returns 0 on success, or else a negative error code. */
810
static int
811
xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
812
                 struct xdr_array2_desc *desc, int encode)
813
{
814
        char *elem = NULL, *c;
815
        unsigned int copied = 0, todo, avail_here;
816
        struct page **ppages = NULL;
817
        int err;
818
 
819
        if (encode) {
820
                if (xdr_encode_word(buf, base, desc->array_len) != 0)
821
                        return -EINVAL;
822
        } else {
823
                if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
824
                    desc->array_len > desc->array_maxlen ||
825
                    (unsigned long) base + 4 + desc->array_len *
826
                                    desc->elem_size > buf->len)
827
                        return -EINVAL;
828
        }
829
        base += 4;
830
 
831
        if (!desc->xcode)
832
                return 0;
833
 
834
        todo = desc->array_len * desc->elem_size;
835
 
836
        /* process head */
837
        if (todo && base < buf->head->iov_len) {
838
                c = buf->head->iov_base + base;
839
                avail_here = min_t(unsigned int, todo,
840
                                   buf->head->iov_len - base);
841
                todo -= avail_here;
842
 
843
                while (avail_here >= desc->elem_size) {
844
                        err = desc->xcode(desc, c);
845
                        if (err)
846
                                goto out;
847
                        c += desc->elem_size;
848
                        avail_here -= desc->elem_size;
849
                }
850
                if (avail_here) {
851
                        if (!elem) {
852
                                elem = kmalloc(desc->elem_size, GFP_KERNEL);
853
                                err = -ENOMEM;
854
                                if (!elem)
855
                                        goto out;
856
                        }
857
                        if (encode) {
858
                                err = desc->xcode(desc, elem);
859
                                if (err)
860
                                        goto out;
861
                                memcpy(c, elem, avail_here);
862
                        } else
863
                                memcpy(elem, c, avail_here);
864
                        copied = avail_here;
865
                }
866
                base = buf->head->iov_len;  /* align to start of pages */
867
        }
868
 
869
        /* process pages array */
870
        base -= buf->head->iov_len;
871
        if (todo && base < buf->page_len) {
872
                unsigned int avail_page;
873
 
874
                avail_here = min(todo, buf->page_len - base);
875
                todo -= avail_here;
876
 
877
                base += buf->page_base;
878
                ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
879
                base &= ~PAGE_CACHE_MASK;
880
                avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
881
                                        avail_here);
882
                c = kmap(*ppages) + base;
883
 
884
                while (avail_here) {
885
                        avail_here -= avail_page;
886
                        if (copied || avail_page < desc->elem_size) {
887
                                unsigned int l = min(avail_page,
888
                                        desc->elem_size - copied);
889
                                if (!elem) {
890
                                        elem = kmalloc(desc->elem_size,
891
                                                       GFP_KERNEL);
892
                                        err = -ENOMEM;
893
                                        if (!elem)
894
                                                goto out;
895
                                }
896
                                if (encode) {
897
                                        if (!copied) {
898
                                                err = desc->xcode(desc, elem);
899
                                                if (err)
900
                                                        goto out;
901
                                        }
902
                                        memcpy(c, elem + copied, l);
903
                                        copied += l;
904
                                        if (copied == desc->elem_size)
905
                                                copied = 0;
906
                                } else {
907
                                        memcpy(elem + copied, c, l);
908
                                        copied += l;
909
                                        if (copied == desc->elem_size) {
910
                                                err = desc->xcode(desc, elem);
911
                                                if (err)
912
                                                        goto out;
913
                                                copied = 0;
914
                                        }
915
                                }
916
                                avail_page -= l;
917
                                c += l;
918
                        }
919
                        while (avail_page >= desc->elem_size) {
920
                                err = desc->xcode(desc, c);
921
                                if (err)
922
                                        goto out;
923
                                c += desc->elem_size;
924
                                avail_page -= desc->elem_size;
925
                        }
926
                        if (avail_page) {
927
                                unsigned int l = min(avail_page,
928
                                            desc->elem_size - copied);
929
                                if (!elem) {
930
                                        elem = kmalloc(desc->elem_size,
931
                                                       GFP_KERNEL);
932
                                        err = -ENOMEM;
933
                                        if (!elem)
934
                                                goto out;
935
                                }
936
                                if (encode) {
937
                                        if (!copied) {
938
                                                err = desc->xcode(desc, elem);
939
                                                if (err)
940
                                                        goto out;
941
                                        }
942
                                        memcpy(c, elem + copied, l);
943
                                        copied += l;
944
                                        if (copied == desc->elem_size)
945
                                                copied = 0;
946
                                } else {
947
                                        memcpy(elem + copied, c, l);
948
                                        copied += l;
949
                                        if (copied == desc->elem_size) {
950
                                                err = desc->xcode(desc, elem);
951
                                                if (err)
952
                                                        goto out;
953
                                                copied = 0;
954
                                        }
955
                                }
956
                        }
957
                        if (avail_here) {
958
                                kunmap(*ppages);
959
                                ppages++;
960
                                c = kmap(*ppages);
961
                        }
962
 
963
                        avail_page = min(avail_here,
964
                                 (unsigned int) PAGE_CACHE_SIZE);
965
                }
966
                base = buf->page_len;  /* align to start of tail */
967
        }
968
 
969
        /* process tail */
970
        base -= buf->page_len;
971
        if (todo) {
972
                c = buf->tail->iov_base + base;
973
                if (copied) {
974
                        unsigned int l = desc->elem_size - copied;
975
 
976
                        if (encode)
977
                                memcpy(c, elem + copied, l);
978
                        else {
979
                                memcpy(elem + copied, c, l);
980
                                err = desc->xcode(desc, elem);
981
                                if (err)
982
                                        goto out;
983
                        }
984
                        todo -= l;
985
                        c += l;
986
                }
987
                while (todo) {
988
                        err = desc->xcode(desc, c);
989
                        if (err)
990
                                goto out;
991
                        c += desc->elem_size;
992
                        todo -= desc->elem_size;
993
                }
994
        }
995
        err = 0;
996
 
997
out:
998
        kfree(elem);
999
        if (ppages)
1000
                kunmap(*ppages);
1001
        return err;
1002
}
1003
 
1004
int
1005
xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1006
                  struct xdr_array2_desc *desc)
1007
{
1008
        if (base >= buf->len)
1009
                return -EINVAL;
1010
 
1011
        return xdr_xcode_array2(buf, base, desc, 0);
1012
}
1013
 
1014
int
1015
xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1016
                  struct xdr_array2_desc *desc)
1017
{
1018
        if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1019
            buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1020
                return -EINVAL;
1021
 
1022
        return xdr_xcode_array2(buf, base, desc, 1);
1023
}
1024
 
1025
int
1026
xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1027
                int (*actor)(struct scatterlist *, void *), void *data)
1028
{
1029
        int i, ret = 0;
1030
        unsigned page_len, thislen, page_offset;
1031
        struct scatterlist      sg[1];
1032
 
1033
        sg_init_table(sg, 1);
1034
 
1035
        if (offset >= buf->head[0].iov_len) {
1036
                offset -= buf->head[0].iov_len;
1037
        } else {
1038
                thislen = buf->head[0].iov_len - offset;
1039
                if (thislen > len)
1040
                        thislen = len;
1041
                sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1042
                ret = actor(sg, data);
1043
                if (ret)
1044
                        goto out;
1045
                offset = 0;
1046
                len -= thislen;
1047
        }
1048
        if (len == 0)
1049
                goto out;
1050
 
1051
        if (offset >= buf->page_len) {
1052
                offset -= buf->page_len;
1053
        } else {
1054
                page_len = buf->page_len - offset;
1055
                if (page_len > len)
1056
                        page_len = len;
1057
                len -= page_len;
1058
                page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1059
                i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1060
                thislen = PAGE_CACHE_SIZE - page_offset;
1061
                do {
1062
                        if (thislen > page_len)
1063
                                thislen = page_len;
1064
                        sg_set_page(sg, buf->pages[i], thislen, page_offset);
1065
                        ret = actor(sg, data);
1066
                        if (ret)
1067
                                goto out;
1068
                        page_len -= thislen;
1069
                        i++;
1070
                        page_offset = 0;
1071
                        thislen = PAGE_CACHE_SIZE;
1072
                } while (page_len != 0);
1073
                offset = 0;
1074
        }
1075
        if (len == 0)
1076
                goto out;
1077
        if (offset < buf->tail[0].iov_len) {
1078
                thislen = buf->tail[0].iov_len - offset;
1079
                if (thislen > len)
1080
                        thislen = len;
1081
                sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1082
                ret = actor(sg, data);
1083
                len -= thislen;
1084
        }
1085
        if (len != 0)
1086
                ret = -EINVAL;
1087
out:
1088
        return ret;
1089
}
1090
EXPORT_SYMBOL(xdr_process_buf);
1091
 

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.