OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [md/] [lvm-snap.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * kernel/lvm-snap.c
3
 *
4
 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5
 *               2000 - 2002 Heinz Mauelshagen, Sistina Software
6
 *
7
 * LVM snapshot driver is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2, or (at your option)
10
 * any later version.
11
 *
12
 * LVM snapshot driver is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with GNU CC; see the file COPYING.  If not, write to
19
 * the Free Software Foundation, 59 Temple Place - Suite 330,
20
 * Boston, MA 02111-1307, USA.
21
 *
22
 */
23
 
24
/*
25
 * Changelog
26
 *
27
 *    05/07/2000 - implemented persistent snapshot support
28
 *    23/11/2000 - used cpu_to_le64 rather than my own macro
29
 *    25/01/2001 - Put LockPage back in
30
 *    01/02/2001 - A dropped snapshot is now set as inactive
31
 *    14/02/2001 - tidied debug statements
32
 *    19/02/2001 - changed rawio calls to pass in preallocated buffer_heads
33
 *    26/02/2001 - introduced __brw_kiovec to remove a lot of conditional
34
 *                 compiles.
35
 *    07/03/2001 - fixed COW exception table not persistent on 2.2 (HM)
36
 *    12/03/2001 - lvm_pv_get_number changes:
37
 *                 o made it static
38
 *                 o renamed it to _pv_get_number
39
 *                 o pv number is returned in new uint * arg
40
 *                 o -1 returned on error
41
 *                 lvm_snapshot_fill_COW_table has a return value too.
42
 *    15/10/2001 - fix snapshot alignment problem [CM]
43
 *               - fix snapshot full oops (always check lv_block_exception) [CM]
44
 *    26/06/2002 - support for new list_move macro [patch@luckynet.dynu.com]
45
 *    26/07/2002 - removed conditional list_move macro because we will
46
 *                 discontinue LVM1 before 2.6 anyway
47
 *    27/08/2003 - fixed unsafe list handling in lvm_find_exception_table() [HM]
48
 *
49
 */
50
 
51
#include <linux/kernel.h>
52
#include <linux/vmalloc.h>
53
#include <linux/blkdev.h>
54
#include <linux/smp_lock.h>
55
#include <linux/types.h>
56
#include <linux/iobuf.h>
57
#include <linux/lvm.h>
58
#include <linux/devfs_fs_kernel.h>
59
 
60
 
61
#include "lvm-internal.h"
62
 
63
static char *lvm_snap_version __attribute__ ((unused)) =
64
    "LVM " LVM_RELEASE_NAME " snapshot code (" LVM_RELEASE_DATE ")\n";
65
 
66
 
67
extern const char *const lvm_name;
68
extern int lvm_blocksizes[];
69
 
70
void lvm_snapshot_release(lv_t *);
71
 
72
static int _write_COW_table_block(vg_t * vg, lv_t * lv, int idx,
73
                                  const char **reason);
74
static void _disable_snapshot(vg_t * vg, lv_t * lv);
75
 
76
 
77
static inline int __brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
78
                               kdev_t dev, unsigned long b[], int size,
79
                               lv_t * lv)
80
{
81
        return brw_kiovec(rw, nr, iovec, dev, b, size);
82
}
83
 
84
 
85
static int _pv_get_number(vg_t * vg, kdev_t rdev, uint * pvn)
86
{
87
        uint p;
88
        for (p = 0; p < vg->pv_max; p++) {
89
                if (vg->pv[p] == NULL)
90
                        continue;
91
 
92
                if (vg->pv[p]->pv_dev == rdev)
93
                        break;
94
        }
95
 
96
        if (p >= vg->pv_max) {
97
                /* bad news, the snapshot COW table is probably corrupt */
98
                printk(KERN_ERR
99
                       "%s -- _pv_get_number failed for rdev = %u\n",
100
                       lvm_name, rdev);
101
                return -1;
102
        }
103
 
104
        *pvn = vg->pv[p]->pv_number;
105
        return 0;
106
}
107
 
108
 
109
#define hashfn(dev,block,mask,chunk_size) \
110
        ((HASHDEV(dev)^((block)/(chunk_size))) & (mask))
111
 
112
static inline lv_block_exception_t *lvm_find_exception_table(kdev_t
113
                                                             org_dev,
114
                                                             unsigned long
115
                                                             org_start,
116
                                                             lv_t * lv)
117
{
118
        struct list_head *hash_table = lv->lv_snapshot_hash_table, *next, *n;
119
        unsigned long mask = lv->lv_snapshot_hash_mask;
120
        int chunk_size = lv->lv_chunk_size;
121
        lv_block_exception_t *ret;
122
        int i = 0;
123
 
124
        hash_table =
125
            &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
126
        ret = NULL;
127
 
128
        for (next = hash_table->next, n = next->next; next != hash_table;
129
             next = n, n = next->next) {
130
                lv_block_exception_t *exception;
131
 
132
                exception = list_entry(next, lv_block_exception_t, hash);
133
                if (exception->rsector_org == org_start &&
134
                    exception->rdev_org == org_dev) {
135
                        if (i) {
136
                                /* fun, isn't it? :) */
137
                                list_del(next);
138
                                list_add(next, hash_table);
139
                        }
140
                        ret = exception;
141
                        break;
142
                }
143
                i++;
144
        }
145
        return ret;
146
}
147
 
148
inline void lvm_hash_link(lv_block_exception_t * exception,
149
                          kdev_t org_dev, unsigned long org_start,
150
                          lv_t * lv)
151
{
152
        struct list_head *hash_table = lv->lv_snapshot_hash_table;
153
        unsigned long mask = lv->lv_snapshot_hash_mask;
154
        int chunk_size = lv->lv_chunk_size;
155
 
156
        if (!hash_table)
157
                BUG();
158
        hash_table =
159
            &hash_table[hashfn(org_dev, org_start, mask, chunk_size)];
160
        list_add(&exception->hash, hash_table);
161
}
162
 
163
/*
164
 * Determine if we already have a snapshot chunk for this block.
165
 * Return: 1 if it the chunk already exists
166
 *         0 if we need to COW this block and allocate a new chunk
167
 *        -1 if the snapshot was disabled because it ran out of space
168
 *
169
 * We need to be holding at least a read lock on lv->lv_lock.
170
 */
171
int lvm_snapshot_remap_block(kdev_t * org_dev, unsigned long *org_sector,
172
                             unsigned long pe_start, lv_t * lv)
173
{
174
        int ret;
175
        unsigned long pe_off, pe_adjustment, __org_start;
176
        kdev_t __org_dev;
177
        int chunk_size = lv->lv_chunk_size;
178
        lv_block_exception_t *exception;
179
 
180
        if (!lv->lv_block_exception)
181
                return -1;
182
 
183
        pe_off = pe_start % chunk_size;
184
        pe_adjustment = (*org_sector - pe_off) % chunk_size;
185
        __org_start = *org_sector - pe_adjustment;
186
        __org_dev = *org_dev;
187
        ret = 0;
188
        exception = lvm_find_exception_table(__org_dev, __org_start, lv);
189
        if (exception) {
190
                *org_dev = exception->rdev_new;
191
                *org_sector = exception->rsector_new + pe_adjustment;
192
                ret = 1;
193
        }
194
        return ret;
195
}
196
 
197
void lvm_drop_snapshot(vg_t * vg, lv_t * lv_snap, const char *reason)
198
{
199
        kdev_t last_dev;
200
        int i;
201
 
202
        /* no exception storage space available for this snapshot
203
           or error on this snapshot --> release it */
204
        invalidate_buffers(lv_snap->lv_dev);
205
 
206
        /* wipe the snapshot since it's inconsistent now */
207
        _disable_snapshot(vg, lv_snap);
208
 
209
        for (i = last_dev = 0; i < lv_snap->lv_remap_ptr; i++) {
210
                if (lv_snap->lv_block_exception[i].rdev_new != last_dev) {
211
                        last_dev = lv_snap->lv_block_exception[i].rdev_new;
212
                        invalidate_buffers(last_dev);
213
                }
214
        }
215
 
216
        lvm_snapshot_release(lv_snap);
217
        lv_snap->lv_status &= ~LV_ACTIVE;
218
 
219
        printk(KERN_INFO
220
               "%s -- giving up to snapshot %s on %s: %s\n",
221
               lvm_name, lv_snap->lv_snapshot_org->lv_name,
222
               lv_snap->lv_name, reason);
223
}
224
 
225
static inline int lvm_snapshot_prepare_blocks(unsigned long *blocks,
226
                                              unsigned long start,
227
                                              int nr_sectors,
228
                                              int blocksize)
229
{
230
        int i, sectors_per_block, nr_blocks;
231
 
232
        sectors_per_block = blocksize / SECTOR_SIZE;
233
 
234
        if (start & (sectors_per_block - 1))
235
                return 0;
236
 
237
        nr_blocks = nr_sectors / sectors_per_block;
238
        start /= sectors_per_block;
239
 
240
        for (i = 0; i < nr_blocks; i++)
241
                blocks[i] = start++;
242
 
243
        return 1;
244
}
245
 
246
inline int lvm_get_blksize(kdev_t dev)
247
{
248
        int correct_size = BLOCK_SIZE, i, major;
249
 
250
        major = MAJOR(dev);
251
        if (blksize_size[major]) {
252
                i = blksize_size[major][MINOR(dev)];
253
                if (i)
254
                        correct_size = i;
255
        }
256
        return correct_size;
257
}
258
 
259
#ifdef DEBUG_SNAPSHOT
260
static inline void invalidate_snap_cache(unsigned long start,
261
                                         unsigned long nr, kdev_t dev)
262
{
263
        struct buffer_head *bh;
264
        int sectors_per_block, i, blksize, minor;
265
 
266
        minor = MINOR(dev);
267
        blksize = lvm_blocksizes[minor];
268
        sectors_per_block = blksize >> 9;
269
        nr /= sectors_per_block;
270
        start /= sectors_per_block;
271
 
272
        for (i = 0; i < nr; i++) {
273
                bh = get_hash_table(dev, start++, blksize);
274
                if (bh)
275
                        bforget(bh);
276
        }
277
}
278
#endif
279
 
280
 
281
int lvm_snapshot_fill_COW_page(vg_t * vg, lv_t * lv_snap)
282
{
283
        int id = 0, is = lv_snap->lv_remap_ptr;
284
        ulong blksize_snap;
285
        lv_COW_table_disk_t *lv_COW_table = (lv_COW_table_disk_t *)
286
            page_address(lv_snap->lv_COW_table_iobuf->maplist[0]);
287
 
288
        if (is == 0)
289
                return 0;
290
 
291
        is--;
292
        blksize_snap =
293
            lvm_get_blksize(lv_snap->lv_block_exception[is].rdev_new);
294
        is -= is % (blksize_snap / sizeof(lv_COW_table_disk_t));
295
 
296
        memset(lv_COW_table, 0, blksize_snap);
297
        for (; is < lv_snap->lv_remap_ptr; is++, id++) {
298
                /* store new COW_table entry */
299
                lv_block_exception_t *be =
300
                    lv_snap->lv_block_exception + is;
301
                uint pvn;
302
 
303
                if (_pv_get_number(vg, be->rdev_org, &pvn))
304
                        goto bad;
305
 
306
                lv_COW_table[id].pv_org_number = cpu_to_le64(pvn);
307
                lv_COW_table[id].pv_org_rsector =
308
                    cpu_to_le64(be->rsector_org);
309
 
310
                if (_pv_get_number(vg, be->rdev_new, &pvn))
311
                        goto bad;
312
 
313
                lv_COW_table[id].pv_snap_number = cpu_to_le64(pvn);
314
                lv_COW_table[id].pv_snap_rsector =
315
                    cpu_to_le64(be->rsector_new);
316
        }
317
 
318
        return 0;
319
 
320
      bad:
321
        printk(KERN_ERR "%s -- lvm_snapshot_fill_COW_page failed",
322
               lvm_name);
323
        return -1;
324
}
325
 
326
 
327
/*
328
 * writes a COW exception table sector to disk (HM)
329
 *
330
 * We need to hold a write lock on lv_snap->lv_lock.
331
 */
332
int lvm_write_COW_table_block(vg_t * vg, lv_t * lv_snap)
333
{
334
        int r;
335
        const char *err;
336
        if ((r = _write_COW_table_block(vg, lv_snap,
337
                                        lv_snap->lv_remap_ptr - 1, &err)))
338
                lvm_drop_snapshot(vg, lv_snap, err);
339
        return r;
340
}
341
 
342
/*
343
 * copy on write handler for one snapshot logical volume
344
 *
345
 * read the original blocks and store it/them on the new one(s).
346
 * if there is no exception storage space free any longer --> release snapshot.
347
 *
348
 * this routine gets called for each _first_ write to a physical chunk.
349
 *
350
 * We need to hold a write lock on lv_snap->lv_lock.  It is assumed that
351
 * lv->lv_block_exception is non-NULL (checked by lvm_snapshot_remap_block())
352
 * when this function is called.
353
 */
354
int lvm_snapshot_COW(kdev_t org_phys_dev,
355
                     unsigned long org_phys_sector,
356
                     unsigned long org_pe_start,
357
                     unsigned long org_virt_sector,
358
                     vg_t * vg, lv_t * lv_snap)
359
{
360
        const char *reason;
361
        unsigned long org_start, snap_start, snap_phys_dev, virt_start,
362
            pe_off;
363
        unsigned long phys_start;
364
        int idx = lv_snap->lv_remap_ptr, chunk_size =
365
            lv_snap->lv_chunk_size;
366
        struct kiobuf *iobuf = lv_snap->lv_iobuf;
367
        unsigned long *blocks = iobuf->blocks;
368
        int blksize_snap, blksize_org, min_blksize, max_blksize;
369
        int max_sectors, nr_sectors;
370
 
371
        /* check if we are out of snapshot space */
372
        if (idx >= lv_snap->lv_remap_end)
373
                goto fail_out_of_space;
374
 
375
        /* calculate physical boundaries of source chunk */
376
        pe_off = org_pe_start % chunk_size;
377
        org_start =
378
            org_phys_sector - ((org_phys_sector - pe_off) % chunk_size);
379
        virt_start = org_virt_sector - (org_phys_sector - org_start);
380
 
381
        /* calculate physical boundaries of destination chunk */
382
        snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
383
        snap_start = lv_snap->lv_block_exception[idx].rsector_new;
384
 
385
#ifdef DEBUG_SNAPSHOT
386
        printk(KERN_INFO
387
               "%s -- COW: "
388
               "org %s faulting %lu start %lu, snap %s start %lu, "
389
               "size %d, pe_start %lu pe_off %lu, virt_sec %lu\n",
390
               lvm_name,
391
               kdevname(org_phys_dev), org_phys_sector, org_start,
392
               kdevname(snap_phys_dev), snap_start,
393
               chunk_size, org_pe_start, pe_off, org_virt_sector);
394
#endif
395
 
396
        blksize_org = lvm_sectsize(org_phys_dev);
397
        blksize_snap = lvm_sectsize(snap_phys_dev);
398
        max_blksize = max(blksize_org, blksize_snap);
399
        min_blksize = min(blksize_org, blksize_snap);
400
        max_sectors = KIO_MAX_SECTORS * (min_blksize >> 9);
401
 
402
        if (chunk_size % (max_blksize >> 9))
403
                goto fail_blksize;
404
 
405
        /* Don't change org_start, we need it to fill in the exception table */
406
        phys_start = org_start;
407
 
408
        while (chunk_size) {
409
                nr_sectors = min(chunk_size, max_sectors);
410
                chunk_size -= nr_sectors;
411
 
412
                iobuf->length = nr_sectors << 9;
413
 
414
                if (!lvm_snapshot_prepare_blocks(blocks, phys_start,
415
                                                 nr_sectors, blksize_org))
416
                        goto fail_prepare;
417
 
418
                if (__brw_kiovec(READ, 1, &iobuf, org_phys_dev, blocks,
419
                                 blksize_org,
420
                                 lv_snap) != (nr_sectors << 9))
421
                        goto fail_raw_read;
422
 
423
                if (!lvm_snapshot_prepare_blocks(blocks, snap_start,
424
                                                 nr_sectors, blksize_snap))
425
                        goto fail_prepare;
426
 
427
                if (__brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev, blocks,
428
                                 blksize_snap,
429
                                 lv_snap) != (nr_sectors << 9))
430
                        goto fail_raw_write;
431
 
432
                phys_start += nr_sectors;
433
                snap_start += nr_sectors;
434
        }
435
 
436
#ifdef DEBUG_SNAPSHOT
437
        /* invalidate the logical snapshot buffer cache */
438
        invalidate_snap_cache(virt_start, lv_snap->lv_chunk_size,
439
                              lv_snap->lv_dev);
440
#endif
441
 
442
        /* the original chunk is now stored on the snapshot volume
443
           so update the execption table */
444
        lv_snap->lv_block_exception[idx].rdev_org = org_phys_dev;
445
        lv_snap->lv_block_exception[idx].rsector_org = org_start;
446
 
447
        lvm_hash_link(lv_snap->lv_block_exception + idx,
448
                      org_phys_dev, org_start, lv_snap);
449
        lv_snap->lv_remap_ptr = idx + 1;
450
        if (lv_snap->lv_snapshot_use_rate > 0) {
451
                if (lv_snap->lv_remap_ptr * 100 / lv_snap->lv_remap_end >=
452
                    lv_snap->lv_snapshot_use_rate)
453
                        wake_up_interruptible(&lv_snap->lv_snapshot_wait);
454
        }
455
        return 0;
456
 
457
        /* slow path */
458
      out:
459
        lvm_drop_snapshot(vg, lv_snap, reason);
460
        return 1;
461
 
462
      fail_out_of_space:
463
        reason = "out of space";
464
        goto out;
465
      fail_raw_read:
466
        reason = "read error";
467
        goto out;
468
      fail_raw_write:
469
        reason = "write error";
470
        goto out;
471
      fail_blksize:
472
        reason = "blocksize error";
473
        goto out;
474
 
475
      fail_prepare:
476
        reason = "couldn't prepare kiovec blocks "
477
            "(start probably isn't block aligned)";
478
        goto out;
479
}
480
 
481
int lvm_snapshot_alloc_iobuf_pages(struct kiobuf *iobuf, int sectors)
482
{
483
        int bytes, nr_pages, err, i;
484
 
485
        bytes = sectors * SECTOR_SIZE;
486
        nr_pages = (bytes + ~PAGE_MASK) >> PAGE_SHIFT;
487
        err = expand_kiobuf(iobuf, nr_pages);
488
        if (err)
489
                goto out;
490
 
491
        err = -ENOMEM;
492
        iobuf->locked = 1;
493
        iobuf->nr_pages = 0;
494
        for (i = 0; i < nr_pages; i++) {
495
                struct page *page;
496
 
497
                page = alloc_page(GFP_KERNEL);
498
                if (!page)
499
                        goto out;
500
 
501
                iobuf->maplist[i] = page;
502
                LockPage(page);
503
                iobuf->nr_pages++;
504
        }
505
        iobuf->offset = 0;
506
 
507
        err = 0;
508
 
509
      out:
510
        return err;
511
}
512
 
513
static int calc_max_buckets(void)
514
{
515
        unsigned long mem;
516
 
517
        mem = num_physpages << PAGE_SHIFT;
518
        mem /= 100;
519
        mem *= 2;
520
        mem /= sizeof(struct list_head);
521
 
522
        return mem;
523
}
524
 
525
int lvm_snapshot_alloc_hash_table(lv_t * lv)
526
{
527
        int err;
528
        unsigned long buckets, max_buckets, size;
529
        struct list_head *hash;
530
 
531
        buckets = lv->lv_remap_end;
532
        max_buckets = calc_max_buckets();
533
        buckets = min(buckets, max_buckets);
534
        while (buckets & (buckets - 1))
535
                buckets &= (buckets - 1);
536
 
537
        size = buckets * sizeof(struct list_head);
538
 
539
        err = -ENOMEM;
540
        hash = vmalloc(size);
541
        lv->lv_snapshot_hash_table = hash;
542
 
543
        if (!hash)
544
                goto out;
545
        lv->lv_snapshot_hash_table_size = size;
546
 
547
        lv->lv_snapshot_hash_mask = buckets - 1;
548
        while (buckets--)
549
                INIT_LIST_HEAD(hash + buckets);
550
        err = 0;
551
      out:
552
        return err;
553
}
554
 
555
int lvm_snapshot_alloc(lv_t * lv_snap)
556
{
557
        int ret;
558
 
559
        /* allocate kiovec to do chunk io */
560
        ret = alloc_kiovec(1, &lv_snap->lv_iobuf);
561
        if (ret)
562
                goto out;
563
 
564
        ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_iobuf,
565
                                             KIO_MAX_SECTORS);
566
        if (ret)
567
                goto out_free_kiovec;
568
 
569
        /* allocate kiovec to do exception table io */
570
        ret = alloc_kiovec(1, &lv_snap->lv_COW_table_iobuf);
571
        if (ret)
572
                goto out_free_kiovec;
573
 
574
        ret = lvm_snapshot_alloc_iobuf_pages(lv_snap->lv_COW_table_iobuf,
575
                                             PAGE_SIZE / SECTOR_SIZE);
576
        if (ret)
577
                goto out_free_both_kiovecs;
578
 
579
        ret = lvm_snapshot_alloc_hash_table(lv_snap);
580
        if (ret)
581
                goto out_free_both_kiovecs;
582
 
583
      out:
584
        return ret;
585
 
586
      out_free_both_kiovecs:
587
        unmap_kiobuf(lv_snap->lv_COW_table_iobuf);
588
        free_kiovec(1, &lv_snap->lv_COW_table_iobuf);
589
        lv_snap->lv_COW_table_iobuf = NULL;
590
 
591
      out_free_kiovec:
592
        unmap_kiobuf(lv_snap->lv_iobuf);
593
        free_kiovec(1, &lv_snap->lv_iobuf);
594
        lv_snap->lv_iobuf = NULL;
595
        vfree(lv_snap->lv_snapshot_hash_table);
596
        lv_snap->lv_snapshot_hash_table = NULL;
597
        goto out;
598
}
599
 
600
void lvm_snapshot_release(lv_t * lv)
601
{
602
        if (lv->lv_block_exception) {
603
                vfree(lv->lv_block_exception);
604
                lv->lv_block_exception = NULL;
605
        }
606
        if (lv->lv_snapshot_hash_table) {
607
                vfree(lv->lv_snapshot_hash_table);
608
                lv->lv_snapshot_hash_table = NULL;
609
                lv->lv_snapshot_hash_table_size = 0;
610
        }
611
        if (lv->lv_iobuf) {
612
                kiobuf_wait_for_io(lv->lv_iobuf);
613
                unmap_kiobuf(lv->lv_iobuf);
614
                free_kiovec(1, &lv->lv_iobuf);
615
                lv->lv_iobuf = NULL;
616
        }
617
        if (lv->lv_COW_table_iobuf) {
618
                kiobuf_wait_for_io(lv->lv_COW_table_iobuf);
619
                unmap_kiobuf(lv->lv_COW_table_iobuf);
620
                free_kiovec(1, &lv->lv_COW_table_iobuf);
621
                lv->lv_COW_table_iobuf = NULL;
622
        }
623
}
624
 
625
 
626
static int _write_COW_table_block(vg_t * vg, lv_t * lv_snap,
627
                                  int idx, const char **reason)
628
{
629
        int blksize_snap;
630
        int end_of_table;
631
        int idx_COW_table;
632
        uint pvn;
633
        ulong snap_pe_start, COW_table_sector_offset,
634
            COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
635
        ulong blocks[1];
636
        kdev_t snap_phys_dev;
637
        lv_block_exception_t *be;
638
        struct kiobuf *COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
639
        lv_COW_table_disk_t *lv_COW_table =
640
            (lv_COW_table_disk_t *) page_address(lv_snap->
641
                                                 lv_COW_table_iobuf->
642
                                                 maplist[0]);
643
 
644
        COW_chunks_per_pe = LVM_GET_COW_TABLE_CHUNKS_PER_PE(vg, lv_snap);
645
        COW_entries_per_pe = LVM_GET_COW_TABLE_ENTRIES_PER_PE(vg, lv_snap);
646
 
647
        /* get physical addresse of destination chunk */
648
        snap_phys_dev = lv_snap->lv_block_exception[idx].rdev_new;
649
        snap_pe_start =
650
            lv_snap->lv_block_exception[idx -
651
                                        (idx %
652
                                         COW_entries_per_pe)].rsector_new -
653
            lv_snap->lv_chunk_size;
654
 
655
        blksize_snap = lvm_sectsize(snap_phys_dev);
656
 
657
        COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t);
658
        idx_COW_table = idx % COW_entries_per_pe % COW_entries_per_block;
659
 
660
        if (idx_COW_table == 0)
661
                memset(lv_COW_table, 0, blksize_snap);
662
 
663
        /* sector offset into the on disk COW table */
664
        COW_table_sector_offset =
665
            (idx % COW_entries_per_pe) / (SECTOR_SIZE /
666
                                          sizeof(lv_COW_table_disk_t));
667
 
668
        /* COW table block to write next */
669
        blocks[0] =
670
            (snap_pe_start +
671
             COW_table_sector_offset) >> (blksize_snap >> 10);
672
 
673
        /* store new COW_table entry */
674
        be = lv_snap->lv_block_exception + idx;
675
        if (_pv_get_number(vg, be->rdev_org, &pvn))
676
                goto fail_pv_get_number;
677
 
678
        lv_COW_table[idx_COW_table].pv_org_number = cpu_to_le64(pvn);
679
        lv_COW_table[idx_COW_table].pv_org_rsector =
680
            cpu_to_le64(be->rsector_org);
681
        if (_pv_get_number(vg, snap_phys_dev, &pvn))
682
                goto fail_pv_get_number;
683
 
684
        lv_COW_table[idx_COW_table].pv_snap_number = cpu_to_le64(pvn);
685
        lv_COW_table[idx_COW_table].pv_snap_rsector =
686
            cpu_to_le64(be->rsector_new);
687
 
688
        COW_table_iobuf->length = blksize_snap;
689
        /* COW_table_iobuf->nr_pages = 1; */
690
 
691
        if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
692
                         blocks, blksize_snap, lv_snap) != blksize_snap)
693
                goto fail_raw_write;
694
 
695
        /* initialization of next COW exception table block with zeroes */
696
        end_of_table = idx % COW_entries_per_pe == COW_entries_per_pe - 1;
697
        if (idx_COW_table % COW_entries_per_block ==
698
            COW_entries_per_block - 1 || end_of_table) {
699
                /* don't go beyond the end */
700
                if (idx + 1 >= lv_snap->lv_remap_end)
701
                        goto out;
702
 
703
                memset(lv_COW_table, 0, blksize_snap);
704
 
705
                if (end_of_table) {
706
                        idx++;
707
                        snap_phys_dev =
708
                            lv_snap->lv_block_exception[idx].rdev_new;
709
                        snap_pe_start =
710
                            lv_snap->lv_block_exception[idx -
711
                                                        (idx %
712
                                                         COW_entries_per_pe)].
713
                            rsector_new - lv_snap->lv_chunk_size;
714
                        blksize_snap = lvm_sectsize(snap_phys_dev);
715
                        blocks[0] = snap_pe_start >> (blksize_snap >> 10);
716
                } else
717
                        blocks[0]++;
718
 
719
                if (__brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
720
                                 blocks, blksize_snap, lv_snap) !=
721
                    blksize_snap)
722
                        goto fail_raw_write;
723
        }
724
 
725
      out:
726
        return 0;
727
 
728
      fail_raw_write:
729
        *reason = "write error";
730
        return 1;
731
 
732
      fail_pv_get_number:
733
        *reason = "_pv_get_number failed";
734
        return 1;
735
}
736
 
737
/*
738
 * FIXME_1.2
739
 * This function is a bit of a hack; we need to ensure that the
740
 * snapshot is never made active again, because it will surely be
741
 * corrupt.  At the moment we do not have access to the LVM metadata
742
 * from within the kernel.  So we set the first exception to point to
743
 * sector 1 (which will always be within the metadata, and as such
744
 * invalid).  User land tools will check for this when they are asked
745
 * to activate the snapshot and prevent this from happening.
746
 */
747
 
748
static void _disable_snapshot(vg_t * vg, lv_t * lv)
749
{
750
        const char *err;
751
        lv->lv_block_exception[0].rsector_org =
752
            LVM_SNAPSHOT_DROPPED_SECTOR;
753
        if (_write_COW_table_block(vg, lv, 0, &err) < 0) {
754
                printk(KERN_ERR "%s -- couldn't disable snapshot: %s\n",
755
                       lvm_name, err);
756
        }
757
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.