OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [crypto/] [blkcipher.c] - Blame information for rev 81

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * Block chaining cipher operations.
3
 *
4
 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5
 * multiple page boundaries by using temporary blocks.  In user context,
6
 * the kernel is given a chance to schedule us once per page.
7
 *
8
 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9
 *
10
 * This program is free software; you can redistribute it and/or modify it
11
 * under the terms of the GNU General Public License as published by the Free
12
 * Software Foundation; either version 2 of the License, or (at your option)
13
 * any later version.
14
 *
15
 */
16
 
17
#include <linux/crypto.h>
18
#include <linux/errno.h>
19
#include <linux/hardirq.h>
20
#include <linux/kernel.h>
21
#include <linux/module.h>
22
#include <linux/scatterlist.h>
23
#include <linux/seq_file.h>
24
#include <linux/slab.h>
25
#include <linux/string.h>
26
 
27
#include "internal.h"
28
#include "scatterwalk.h"
29
 
30
enum {
31
        BLKCIPHER_WALK_PHYS = 1 << 0,
32
        BLKCIPHER_WALK_SLOW = 1 << 1,
33
        BLKCIPHER_WALK_COPY = 1 << 2,
34
        BLKCIPHER_WALK_DIFF = 1 << 3,
35
};
36
 
37
static int blkcipher_walk_next(struct blkcipher_desc *desc,
38
                               struct blkcipher_walk *walk);
39
static int blkcipher_walk_first(struct blkcipher_desc *desc,
40
                                struct blkcipher_walk *walk);
41
 
42
static inline void blkcipher_map_src(struct blkcipher_walk *walk)
43
{
44
        walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
45
}
46
 
47
static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
48
{
49
        walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
50
}
51
 
52
static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
53
{
54
        scatterwalk_unmap(walk->src.virt.addr, 0);
55
}
56
 
57
static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
58
{
59
        scatterwalk_unmap(walk->dst.virt.addr, 1);
60
}
61
 
62
/* Get a spot of the specified length that does not straddle a page.
63
 * The caller needs to ensure that there is enough space for this operation.
64
 */
65
static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
66
{
67
        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
68
        return max(start, end_page);
69
}
70
 
71
static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
72
                                               struct blkcipher_walk *walk,
73
                                               unsigned int bsize)
74
{
75
        u8 *addr;
76
        unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
77
 
78
        addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
79
        addr = blkcipher_get_spot(addr, bsize);
80
        scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81
        return bsize;
82
}
83
 
84
static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
85
                                               unsigned int n)
86
{
87
        if (walk->flags & BLKCIPHER_WALK_COPY) {
88
                blkcipher_map_dst(walk);
89
                memcpy(walk->dst.virt.addr, walk->page, n);
90
                blkcipher_unmap_dst(walk);
91
        } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92
                blkcipher_unmap_src(walk);
93
                if (walk->flags & BLKCIPHER_WALK_DIFF)
94
                        blkcipher_unmap_dst(walk);
95
        }
96
 
97
        scatterwalk_advance(&walk->in, n);
98
        scatterwalk_advance(&walk->out, n);
99
 
100
        return n;
101
}
102
 
103
int blkcipher_walk_done(struct blkcipher_desc *desc,
104
                        struct blkcipher_walk *walk, int err)
105
{
106
        struct crypto_blkcipher *tfm = desc->tfm;
107
        unsigned int nbytes = 0;
108
 
109
        if (likely(err >= 0)) {
110
                unsigned int n = walk->nbytes - err;
111
 
112
                if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113
                        n = blkcipher_done_fast(walk, n);
114
                else if (WARN_ON(err)) {
115
                        err = -EINVAL;
116
                        goto err;
117
                } else
118
                        n = blkcipher_done_slow(tfm, walk, n);
119
 
120
                nbytes = walk->total - n;
121
                err = 0;
122
        }
123
 
124
        scatterwalk_done(&walk->in, 0, nbytes);
125
        scatterwalk_done(&walk->out, 1, nbytes);
126
 
127
        walk->total = nbytes;
128
        walk->nbytes = nbytes;
129
 
130
        if (nbytes) {
131
                crypto_yield(desc->flags);
132
                return blkcipher_walk_next(desc, walk);
133
        }
134
 
135
err:
136
        if (walk->iv != desc->info)
137
                memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
138
        if (walk->buffer != walk->page)
139
                kfree(walk->buffer);
140
        if (walk->page)
141
                free_page((unsigned long)walk->page);
142
 
143
        return err;
144
}
145
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
146
 
147
static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
148
                                      struct blkcipher_walk *walk,
149
                                      unsigned int bsize,
150
                                      unsigned int alignmask)
151
{
152
        unsigned int n;
153
        unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
154
 
155
        if (walk->buffer)
156
                goto ok;
157
 
158
        walk->buffer = walk->page;
159
        if (walk->buffer)
160
                goto ok;
161
 
162
        n = aligned_bsize * 3 - (alignmask + 1) +
163
            (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
164
        walk->buffer = kmalloc(n, GFP_ATOMIC);
165
        if (!walk->buffer)
166
                return blkcipher_walk_done(desc, walk, -ENOMEM);
167
 
168
ok:
169
        walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
170
                                          alignmask + 1);
171
        walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
172
        walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
173
                                                 aligned_bsize, bsize);
174
 
175
        scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
176
 
177
        walk->nbytes = bsize;
178
        walk->flags |= BLKCIPHER_WALK_SLOW;
179
 
180
        return 0;
181
}
182
 
183
static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
184
{
185
        u8 *tmp = walk->page;
186
 
187
        blkcipher_map_src(walk);
188
        memcpy(tmp, walk->src.virt.addr, walk->nbytes);
189
        blkcipher_unmap_src(walk);
190
 
191
        walk->src.virt.addr = tmp;
192
        walk->dst.virt.addr = tmp;
193
 
194
        return 0;
195
}
196
 
197
static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
198
                                      struct blkcipher_walk *walk)
199
{
200
        unsigned long diff;
201
 
202
        walk->src.phys.page = scatterwalk_page(&walk->in);
203
        walk->src.phys.offset = offset_in_page(walk->in.offset);
204
        walk->dst.phys.page = scatterwalk_page(&walk->out);
205
        walk->dst.phys.offset = offset_in_page(walk->out.offset);
206
 
207
        if (walk->flags & BLKCIPHER_WALK_PHYS)
208
                return 0;
209
 
210
        diff = walk->src.phys.offset - walk->dst.phys.offset;
211
        diff |= walk->src.virt.page - walk->dst.virt.page;
212
 
213
        blkcipher_map_src(walk);
214
        walk->dst.virt.addr = walk->src.virt.addr;
215
 
216
        if (diff) {
217
                walk->flags |= BLKCIPHER_WALK_DIFF;
218
                blkcipher_map_dst(walk);
219
        }
220
 
221
        return 0;
222
}
223
 
224
static int blkcipher_walk_next(struct blkcipher_desc *desc,
225
                               struct blkcipher_walk *walk)
226
{
227
        struct crypto_blkcipher *tfm = desc->tfm;
228
        unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
229
        unsigned int bsize;
230
        unsigned int n;
231
        int err;
232
 
233
        n = walk->total;
234
        if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
235
                desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
236
                return blkcipher_walk_done(desc, walk, -EINVAL);
237
        }
238
 
239
        walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
240
                         BLKCIPHER_WALK_DIFF);
241
        if (!scatterwalk_aligned(&walk->in, alignmask) ||
242
            !scatterwalk_aligned(&walk->out, alignmask)) {
243
                walk->flags |= BLKCIPHER_WALK_COPY;
244
                if (!walk->page) {
245
                        walk->page = (void *)__get_free_page(GFP_ATOMIC);
246
                        if (!walk->page)
247
                                n = 0;
248
                }
249
        }
250
 
251
        bsize = min(walk->blocksize, n);
252
        n = scatterwalk_clamp(&walk->in, n);
253
        n = scatterwalk_clamp(&walk->out, n);
254
 
255
        if (unlikely(n < bsize)) {
256
                err = blkcipher_next_slow(desc, walk, bsize, alignmask);
257
                goto set_phys_lowmem;
258
        }
259
 
260
        walk->nbytes = n;
261
        if (walk->flags & BLKCIPHER_WALK_COPY) {
262
                err = blkcipher_next_copy(walk);
263
                goto set_phys_lowmem;
264
        }
265
 
266
        return blkcipher_next_fast(desc, walk);
267
 
268
set_phys_lowmem:
269
        if (walk->flags & BLKCIPHER_WALK_PHYS) {
270
                walk->src.phys.page = virt_to_page(walk->src.virt.addr);
271
                walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
272
                walk->src.phys.offset &= PAGE_SIZE - 1;
273
                walk->dst.phys.offset &= PAGE_SIZE - 1;
274
        }
275
        return err;
276
}
277
 
278
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
279
                                    struct crypto_blkcipher *tfm,
280
                                    unsigned int alignmask)
281
{
282
        unsigned bs = walk->blocksize;
283
        unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
284
        unsigned aligned_bs = ALIGN(bs, alignmask + 1);
285
        unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
286
                            (alignmask + 1);
287
        u8 *iv;
288
 
289
        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
290
        walk->buffer = kmalloc(size, GFP_ATOMIC);
291
        if (!walk->buffer)
292
                return -ENOMEM;
293
 
294
        iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
295
        iv = blkcipher_get_spot(iv, bs) + aligned_bs;
296
        iv = blkcipher_get_spot(iv, bs) + aligned_bs;
297
        iv = blkcipher_get_spot(iv, ivsize);
298
 
299
        walk->iv = memcpy(iv, walk->iv, ivsize);
300
        return 0;
301
}
302
 
303
int blkcipher_walk_virt(struct blkcipher_desc *desc,
304
                        struct blkcipher_walk *walk)
305
{
306
        walk->flags &= ~BLKCIPHER_WALK_PHYS;
307
        walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
308
        return blkcipher_walk_first(desc, walk);
309
}
310
EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
311
 
312
int blkcipher_walk_phys(struct blkcipher_desc *desc,
313
                        struct blkcipher_walk *walk)
314
{
315
        walk->flags |= BLKCIPHER_WALK_PHYS;
316
        walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
317
        return blkcipher_walk_first(desc, walk);
318
}
319
EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
320
 
321
static int blkcipher_walk_first(struct blkcipher_desc *desc,
322
                                struct blkcipher_walk *walk)
323
{
324
        struct crypto_blkcipher *tfm = desc->tfm;
325
        unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
326
 
327
        if (WARN_ON_ONCE(in_irq()))
328
                return -EDEADLK;
329
 
330
        walk->nbytes = walk->total;
331
        if (unlikely(!walk->total))
332
                return 0;
333
 
334
        walk->buffer = NULL;
335
        walk->iv = desc->info;
336
        if (unlikely(((unsigned long)walk->iv & alignmask))) {
337
                int err = blkcipher_copy_iv(walk, tfm, alignmask);
338
                if (err)
339
                        return err;
340
        }
341
 
342
        scatterwalk_start(&walk->in, walk->in.sg);
343
        scatterwalk_start(&walk->out, walk->out.sg);
344
        walk->page = NULL;
345
 
346
        return blkcipher_walk_next(desc, walk);
347
}
348
 
349
int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
350
                              struct blkcipher_walk *walk,
351
                              unsigned int blocksize)
352
{
353
        walk->flags &= ~BLKCIPHER_WALK_PHYS;
354
        walk->blocksize = blocksize;
355
        return blkcipher_walk_first(desc, walk);
356
}
357
EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
358
 
359
static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
360
                            unsigned int keylen)
361
{
362
        struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
363
        unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
364
        int ret;
365
        u8 *buffer, *alignbuffer;
366
        unsigned long absize;
367
 
368
        absize = keylen + alignmask;
369
        buffer = kmalloc(absize, GFP_ATOMIC);
370
        if (!buffer)
371
                return -ENOMEM;
372
 
373
        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
374
        memcpy(alignbuffer, key, keylen);
375
        ret = cipher->setkey(tfm, alignbuffer, keylen);
376
        memset(alignbuffer, 0, keylen);
377
        kfree(buffer);
378
        return ret;
379
}
380
 
381
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
382
{
383
        struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
384
        unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
385
 
386
        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
387
                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
388
                return -EINVAL;
389
        }
390
 
391
        if ((unsigned long)key & alignmask)
392
                return setkey_unaligned(tfm, key, keylen);
393
 
394
        return cipher->setkey(tfm, key, keylen);
395
}
396
 
397
static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
398
                        unsigned int keylen)
399
{
400
        return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
401
}
402
 
403
static int async_encrypt(struct ablkcipher_request *req)
404
{
405
        struct crypto_tfm *tfm = req->base.tfm;
406
        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
407
        struct blkcipher_desc desc = {
408
                .tfm = __crypto_blkcipher_cast(tfm),
409
                .info = req->info,
410
                .flags = req->base.flags,
411
        };
412
 
413
 
414
        return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
415
}
416
 
417
static int async_decrypt(struct ablkcipher_request *req)
418
{
419
        struct crypto_tfm *tfm = req->base.tfm;
420
        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
421
        struct blkcipher_desc desc = {
422
                .tfm = __crypto_blkcipher_cast(tfm),
423
                .info = req->info,
424
                .flags = req->base.flags,
425
        };
426
 
427
        return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
428
}
429
 
430
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
431
                                             u32 mask)
432
{
433
        struct blkcipher_alg *cipher = &alg->cra_blkcipher;
434
        unsigned int len = alg->cra_ctxsize;
435
 
436
        type ^= CRYPTO_ALG_ASYNC;
437
        mask &= CRYPTO_ALG_ASYNC;
438
        if ((type & mask) && cipher->ivsize) {
439
                len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
440
                len += cipher->ivsize;
441
        }
442
 
443
        return len;
444
}
445
 
446
static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
447
{
448
        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
449
        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
450
 
451
        crt->setkey = async_setkey;
452
        crt->encrypt = async_encrypt;
453
        crt->decrypt = async_decrypt;
454
        crt->ivsize = alg->ivsize;
455
 
456
        return 0;
457
}
458
 
459
static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
460
{
461
        struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
462
        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
463
        unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
464
        unsigned long addr;
465
 
466
        crt->setkey = setkey;
467
        crt->encrypt = alg->encrypt;
468
        crt->decrypt = alg->decrypt;
469
 
470
        addr = (unsigned long)crypto_tfm_ctx(tfm);
471
        addr = ALIGN(addr, align);
472
        addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
473
        crt->iv = (void *)addr;
474
 
475
        return 0;
476
}
477
 
478
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
479
{
480
        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
481
 
482
        if (alg->ivsize > PAGE_SIZE / 8)
483
                return -EINVAL;
484
 
485
        type ^= CRYPTO_ALG_ASYNC;
486
        mask &= CRYPTO_ALG_ASYNC;
487
        if (type & mask)
488
                return crypto_init_blkcipher_ops_sync(tfm);
489
        else
490
                return crypto_init_blkcipher_ops_async(tfm);
491
}
492
 
493
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
494
        __attribute__ ((unused));
495
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
496
{
497
        seq_printf(m, "type         : blkcipher\n");
498
        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
499
        seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
500
        seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
501
        seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
502
}
503
 
504
const struct crypto_type crypto_blkcipher_type = {
505
        .ctxsize = crypto_blkcipher_ctxsize,
506
        .init = crypto_init_blkcipher_ops,
507
#ifdef CONFIG_PROC_FS
508
        .show = crypto_blkcipher_show,
509
#endif
510
};
511
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
512
 
513
MODULE_LICENSE("GPL");
514
MODULE_DESCRIPTION("Generic block chaining cipher type");

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.