/[linux-patches]/genpatches-2.6/historical/2.6.10/4305_dm-multipath.patch
Gentoo

Contents of /genpatches-2.6/historical/2.6.10/4305_dm-multipath.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2 - (show annotations) (download) (as text)
Sat Jun 11 23:16:54 2005 UTC (15 years, 3 months ago) by dsd
File MIME type: text/x-diff
File size: 33546 byte(s)
Import historical releases
1 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/dm-bio-record.h linux-dsd/drivers/md/dm-bio-record.h
2 --- linux-2.6.10-gentoo/drivers/md/dm-bio-record.h 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-dsd/drivers/md/dm-bio-record.h 2004-12-25 04:28:05.545016880 +0000
4 @@ -0,0 +1,45 @@
5 +/*
6 + * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
7 + *
8 + * This file is released under the GPL.
9 + */
10 +
11 +#ifndef DM_BIO_RECORD_H
12 +#define DM_BIO_RECORD_H
13 +
14 +#include <linux/bio.h>
15 +
16 +/*
17 + * There are lots of mutable fields in the bio struct that get
18 + * changed by the lower levels of the block layer. Some targets,
19 + * such as multipath, may wish to resubmit a bio on error. The
20 + * functions in this file help the target record and restore the
21 + * original bio state.
22 + */
23 +struct dm_bio_details {
24 + sector_t bi_sector;
25 + struct block_device *bi_bdev;
26 + unsigned int bi_size;
27 + unsigned short bi_idx;
28 + unsigned long bi_flags;
29 +};
30 +
31 +static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
32 +{
33 + bd->bi_sector = bio->bi_sector;
34 + bd->bi_bdev = bio->bi_bdev;
35 + bd->bi_size = bio->bi_size;
36 + bd->bi_idx = bio->bi_idx;
37 + bd->bi_flags = bio->bi_flags;
38 +}
39 +
40 +static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
41 +{
42 + bio->bi_sector = bd->bi_sector;
43 + bio->bi_bdev = bd->bi_bdev;
44 + bio->bi_size = bd->bi_size;
45 + bio->bi_idx = bd->bi_idx;
46 + bio->bi_flags = bd->bi_flags;
47 +}
48 +
49 +#endif
50 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/dm.c linux-dsd/drivers/md/dm.c
51 --- linux-2.6.10-gentoo/drivers/md/dm.c 2004-12-25 04:12:32.644839264 +0000
52 +++ linux-dsd/drivers/md/dm.c 2004-12-25 04:28:05.544017032 +0000
53 @@ -43,6 +43,13 @@ struct target_io {
54 union map_info info;
55 };
56
57 +union map_info *dm_get_mapinfo(struct bio *bio)
58 +{
59 + if (bio && bio->bi_private)
60 + return &((struct target_io *)bio->bi_private)->info;
61 + return NULL;
62 +}
63 +
64 /*
65 * Bits for the md->flags field.
66 */
67 @@ -1159,6 +1166,8 @@ static struct block_device_operations dm
68 .owner = THIS_MODULE
69 };
70
71 +EXPORT_SYMBOL(dm_get_mapinfo);
72 +
73 /*
74 * module hooks
75 */
76 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/dm.h linux-dsd/drivers/md/dm.h
77 --- linux-2.6.10-gentoo/drivers/md/dm.h 2004-12-25 04:12:32.646838960 +0000
78 +++ linux-dsd/drivers/md/dm.h 2004-12-25 04:28:05.545016880 +0000
79 @@ -187,5 +187,6 @@ int dm_stripe_init(void);
80 void dm_stripe_exit(void);
81
82 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
83 +union map_info *dm_get_mapinfo(struct bio *bio);
84
85 #endif
86 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/dm-mpath.c linux-dsd/drivers/md/dm-mpath.c
87 --- linux-2.6.10-gentoo/drivers/md/dm-mpath.c 1970-01-01 01:00:00.000000000 +0100
88 +++ linux-dsd/drivers/md/dm-mpath.c 2004-12-25 04:28:05.546016728 +0000
89 @@ -0,0 +1,1008 @@
90 +/*
91 + * Copyright (C) 2003 Sistina Software Limited.
92 + * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
93 + *
94 + * This file is released under the GPL.
95 + */
96 +
97 +#include "dm.h"
98 +#include "dm-path-selector.h"
99 +#include "dm-bio-list.h"
100 +#include "dm-bio-record.h"
101 +
102 +#include <linux/ctype.h>
103 +#include <linux/init.h>
104 +#include <linux/mempool.h>
105 +#include <linux/module.h>
106 +#include <linux/pagemap.h>
107 +#include <linux/slab.h>
108 +#include <linux/time.h>
109 +#include <linux/workqueue.h>
110 +#include <asm/atomic.h>
111 +
112 +#define MESG_STR(x) x, sizeof(x)
113 +
114 +/* Path properties */
115 +struct pgpath {
116 + struct list_head list;
117 +
118 + struct priority_group *pg;
119 +
120 + spinlock_t queued_lock;
121 + unsigned fail_count; /* Cumulative */
122 +
123 + struct path path;
124 +};
125 +
126 +#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
127 +
128 +struct priority_group {
129 + struct list_head list;
130 +
131 + struct multipath *m;
132 + struct path_selector ps;
133 +
134 + unsigned nr_paths;
135 + unsigned bypassed; /* Temporarily bypass this PG */
136 + struct list_head pgpaths;
137 +};
138 +
139 +/* Multipath context */
140 +struct multipath {
141 + struct list_head list;
142 + struct dm_target *ti;
143 +
144 + unsigned nr_priority_groups;
145 + struct list_head priority_groups;
146 + unsigned queue_io; /* Must we queue all I/O? */
147 + unsigned queue_if_no_path; /* Queue I/O if last path fails? */
148 +
149 + spinlock_t lock;
150 + unsigned nr_valid_paths;
151 +
152 + struct pgpath *current_pgpath;
153 + struct priority_group *current_pg;
154 + unsigned current_count;
155 +
156 + struct work_struct process_queued_ios;
157 + struct bio_list queued_ios;
158 + unsigned queue_size;
159 +
160 + struct work_struct trigger_event;
161 +
162 + /*
163 + * We must use a mempool of mpath_io structs so that we
164 + * can resubmit bios on error.
165 + */
166 + mempool_t *mpio_pool;
167 +};
168 +
169 +struct mpath_io {
170 + struct pgpath *pgpath;
171 + struct dm_bio_details details;
172 +};
173 +
174 +typedef int (*action_fn) (struct pgpath *pgpath);
175 +
176 +#define MIN_IOS 256
177 +
178 +static kmem_cache_t *_mpio_cache;
179 +
180 +static void process_queued_ios(void *data);
181 +static void trigger_event(void *data);
182 +
183 +static struct pgpath *alloc_pgpath(void)
184 +{
185 + struct pgpath *pgpath = kmalloc(sizeof(*pgpath), GFP_KERNEL);
186 +
187 + if (pgpath) {
188 + memset(pgpath, 0, sizeof(*pgpath));
189 + pgpath->queued_lock = SPIN_LOCK_UNLOCKED;
190 + pgpath->path.is_active = 1;
191 + }
192 +
193 + return pgpath;
194 +}
195 +
196 +static inline void free_pgpath(struct pgpath *pgpath)
197 +{
198 + kfree(pgpath);
199 +}
200 +
201 +static struct priority_group *alloc_priority_group(void)
202 +{
203 + struct priority_group *pg;
204 +
205 + pg = kmalloc(sizeof(*pg), GFP_KERNEL);
206 + if (!pg)
207 + return NULL;
208 +
209 + memset(pg, 0, sizeof(*pg));
210 + INIT_LIST_HEAD(&pg->pgpaths);
211 +
212 + return pg;
213 +}
214 +
215 +static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
216 +{
217 + struct pgpath *pgpath, *tmp;
218 +
219 + list_for_each_entry_safe (pgpath, tmp, pgpaths, list) {
220 + list_del(&pgpath->list);
221 + dm_put_device(ti, pgpath->path.dev);
222 + free_pgpath(pgpath);
223 + }
224 +}
225 +
226 +static void free_priority_group(struct priority_group *pg,
227 + struct dm_target *ti)
228 +{
229 + struct path_selector *ps = &pg->ps;
230 +
231 + if (ps->type) {
232 + ps->type->dtr(ps);
233 + dm_put_path_selector(ps->type);
234 + }
235 +
236 + free_pgpaths(&pg->pgpaths, ti);
237 + kfree(pg);
238 +}
239 +
240 +static struct multipath *alloc_multipath(void)
241 +{
242 + struct multipath *m;
243 +
244 + m = kmalloc(sizeof(*m), GFP_KERNEL);
245 + if (m) {
246 + memset(m, 0, sizeof(*m));
247 + INIT_LIST_HEAD(&m->priority_groups);
248 + m->lock = SPIN_LOCK_UNLOCKED;
249 + m->queue_io = 1;
250 + INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
251 + INIT_WORK(&m->trigger_event, trigger_event, m);
252 + m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
253 + mempool_free_slab, _mpio_cache);
254 + if (!m->mpio_pool) {
255 + kfree(m);
256 + return NULL;
257 + }
258 + }
259 +
260 + return m;
261 +}
262 +
263 +static void free_multipath(struct multipath *m)
264 +{
265 + struct priority_group *pg, *tmp;
266 +
267 + list_for_each_entry_safe (pg, tmp, &m->priority_groups, list) {
268 + list_del(&pg->list);
269 + free_priority_group(pg, m->ti);
270 + }
271 +
272 + mempool_destroy(m->mpio_pool);
273 + kfree(m);
274 +}
275 +
276 +static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
277 +{
278 + m->current_pg = pgpath->pg;
279 +
280 + m->queue_io = 0;
281 +}
282 +
283 +static void __choose_pgpath(struct multipath *m)
284 +{
285 + struct priority_group *pg;
286 + struct pgpath *pgpath = NULL;
287 + struct path *path;
288 + unsigned bypassed = 1;
289 +
290 + if (!m->nr_valid_paths)
291 + goto out;
292 +
293 + /*
294 + * Loop through priority groups until we find a valid path.
295 + * First time we skip PGs marked 'bypassed'.
296 + * Second time we only try the ones we skipped.
297 + */
298 + do {
299 + list_for_each_entry (pg, &m->priority_groups, list) {
300 + if (pg->bypassed == bypassed)
301 + continue;
302 + path = pg->ps.type->select_path(&pg->ps,
303 + &m->current_count);
304 + if (!path)
305 + continue;
306 +
307 + pgpath = path_to_pgpath(path);
308 +
309 + if (m->current_pg != pg)
310 + __switch_pg(m, pgpath);
311 +
312 + goto out;
313 + }
314 + } while (bypassed--);
315 +
316 +out:
317 + m->current_pgpath = pgpath;
318 +}
319 +
320 +static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
321 + unsigned was_queued)
322 +{
323 + unsigned long flags;
324 + struct pgpath *pgpath;
325 + unsigned must_queue = 0;
326 +
327 + spin_lock_irqsave(&m->lock, flags);
328 +
329 + /* Do we need to select a new pgpath? */
330 + if (!m->current_pgpath ||
331 + (!m->queue_io && (m->current_count && --m->current_count == 0)))
332 + __choose_pgpath(m);
333 +
334 + pgpath = m->current_pgpath;
335 +
336 + if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
337 + must_queue = 1;
338 +
339 + if (was_queued)
340 + m->queue_size--;
341 +
342 + if (must_queue) {
343 + /* queue for the daemon to resubmit */
344 + bio_list_add(&m->queued_ios, bio);
345 + m->queue_size++;
346 + if (!m->queue_io)
347 + schedule_work(&m->process_queued_ios);
348 + }
349 +
350 + spin_unlock_irqrestore(&m->lock, flags);
351 +
352 + if (must_queue)
353 + return 0; /* Queued */
354 +
355 + if (!pgpath)
356 + return -EIO;
357 +
358 + mpio->pgpath = pgpath;
359 + bio->bi_bdev = mpio->pgpath->path.dev->bdev;
360 + return 1; /* Mapped successfully */
361 +}
362 +
363 +static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path)
364 +{
365 + unsigned long flags;
366 +
367 + spin_lock_irqsave(&m->lock, flags);
368 + m->queue_if_no_path = queue_if_no_path;
369 + if (!m->queue_if_no_path)
370 + schedule_work(&m->process_queued_ios);
371 + spin_unlock_irqrestore(&m->lock, flags);
372 +
373 + return 0;
374 +}
375 +
376 +/*-----------------------------------------------------------------
377 + * The multipath daemon is responsible for resubmitting queued ios.
378 + *---------------------------------------------------------------*/
379 +
380 +static void dispatch_queued_ios(struct multipath *m)
381 +{
382 + int r;
383 + unsigned long flags;
384 + struct bio *bio = NULL, *next;
385 + struct mpath_io *mpio;
386 + union map_info *info;
387 +
388 + spin_lock_irqsave(&m->lock, flags);
389 + bio = bio_list_get(&m->queued_ios);
390 + spin_unlock_irqrestore(&m->lock, flags);
391 +
392 + while (bio) {
393 + next = bio->bi_next;
394 + bio->bi_next = NULL;
395 +
396 + info = dm_get_mapinfo(bio);
397 + mpio = info->ptr;
398 +
399 + r = map_io(m, bio, mpio, 1);
400 + if (r < 0)
401 + bio_endio(bio, bio->bi_size, r);
402 + else if (r == 1)
403 + generic_make_request(bio);
404 +
405 + bio = next;
406 + }
407 +}
408 +
409 +static void process_queued_ios(void *data)
410 +{
411 + struct multipath *m = (struct multipath *) data;
412 + struct pgpath *pgpath;
413 + unsigned must_queue = 0;
414 + unsigned long flags;
415 +
416 + spin_lock_irqsave(&m->lock, flags);
417 +
418 + if (!m->current_pgpath)
419 + __choose_pgpath(m);
420 +
421 + pgpath = m->current_pgpath;
422 +
423 + if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
424 + must_queue = 1;
425 +
426 + spin_unlock_irqrestore(&m->lock, flags);
427 +
428 + if (!must_queue)
429 + dispatch_queued_ios(m);
430 +}
431 +
432 +/*
433 + * An event is triggered whenever a path is taken out of use.
434 + * Includes path failure and PG bypass.
435 + */
436 +static void trigger_event(void *data)
437 +{
438 + struct multipath *m = (struct multipath *) data;
439 +
440 + dm_table_event(m->ti->table);
441 +}
442 +
443 +/*-----------------------------------------------------------------
444 + * Constructor/argument parsing:
445 + * <num multipath feature args> [<arg>]*
446 + * <num priority groups> [<selector> <num paths> <num selector args>
447 + * [<path> [<arg>]* ]+ ]+
448 + *---------------------------------------------------------------*/
449 +struct param {
450 + unsigned min;
451 + unsigned max;
452 + char *error;
453 +};
454 +
455 +#define ESTR(s) ("dm-multipath: " s)
456 +
457 +static int read_param(struct param *param, char *str, unsigned *v, char **error)
458 +{
459 + if (!str ||
460 + (sscanf(str, "%u", v) != 1) ||
461 + (*v < param->min) ||
462 + (*v > param->max)) {
463 + *error = param->error;
464 + return -EINVAL;
465 + }
466 +
467 + return 0;
468 +}
469 +
470 +struct arg_set {
471 + unsigned argc;
472 + char **argv;
473 +};
474 +
475 +static char *shift(struct arg_set *as)
476 +{
477 + char *r;
478 +
479 + if (as->argc) {
480 + as->argc--;
481 + r = *as->argv;
482 + as->argv++;
483 + return r;
484 + }
485 +
486 + return NULL;
487 +}
488 +
489 +static void consume(struct arg_set *as, unsigned n)
490 +{
491 + BUG_ON (as->argc < n);
492 + as->argc -= n;
493 + as->argv += n;
494 +}
495 +
496 +static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
497 + struct dm_target *ti)
498 +{
499 + int r;
500 + struct pgpath *p;
501 +
502 + /* we need at least a path arg */
503 + if (as->argc < 1) {
504 + ti->error = ESTR("no device given");
505 + return NULL;
506 + }
507 +
508 + p = alloc_pgpath();
509 + if (!p)
510 + return NULL;
511 +
512 + r = dm_get_device(ti, shift(as), ti->begin, ti->len,
513 + dm_table_get_mode(ti->table), &p->path.dev);
514 + if (r) {
515 + ti->error = ESTR("error getting device");
516 + goto bad;
517 + }
518 +
519 + r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
520 + if (r) {
521 + dm_put_device(ti, p->path.dev);
522 + goto bad;
523 + }
524 +
525 + return p;
526 +
527 + bad:
528 + free_pgpath(p);
529 + return NULL;
530 +}
531 +
532 +static struct priority_group *parse_priority_group(struct arg_set *as,
533 + struct multipath *m,
534 + struct dm_target *ti)
535 +{
536 + static struct param _params[] = {
537 + {1, 1024, ESTR("invalid number of paths")},
538 + {0, 1024, ESTR("invalid number of selector args")}
539 + };
540 +
541 + int r;
542 + unsigned i, nr_selector_args, nr_params;
543 + struct priority_group *pg;
544 + struct path_selector_type *pst;
545 +
546 + if (as->argc < 2) {
547 + as->argc = 0;
548 + ti->error = ESTR("not enough priority group aruments");
549 + return NULL;
550 + }
551 +
552 + pg = alloc_priority_group();
553 + if (!pg) {
554 + ti->error = ESTR("couldn't allocate priority group");
555 + return NULL;
556 + }
557 + pg->m = m;
558 +
559 + pst = dm_get_path_selector(shift(as));
560 + if (!pst) {
561 + ti->error = ESTR("unknown path selector type");
562 + goto bad;
563 + }
564 +
565 + /* FIXME Read path selector arguments & pass them to ctr */
566 +
567 + r = pst->ctr(&pg->ps);
568 + if (r) {
569 + dm_put_path_selector(pst);
570 + goto bad;
571 + }
572 + pg->ps.type = pst;
573 +
574 + /*
575 + * read the paths
576 + */
577 + r = read_param(_params, shift(as), &pg->nr_paths, &ti->error);
578 + if (r)
579 + goto bad;
580 +
581 + r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
582 + if (r)
583 + goto bad;
584 +
585 + nr_params = 1 + nr_selector_args;
586 + for (i = 0; i < pg->nr_paths; i++) {
587 + struct pgpath *pgpath;
588 + struct arg_set path_args;
589 +
590 + if (as->argc < nr_params)
591 + goto bad;
592 +
593 + path_args.argc = nr_params;
594 + path_args.argv = as->argv;
595 +
596 + pgpath = parse_path(&path_args, &pg->ps, ti);
597 + if (!pgpath)
598 + goto bad;
599 +
600 + pgpath->pg = pg;
601 + list_add_tail(&pgpath->list, &pg->pgpaths);
602 + consume(as, nr_params);
603 + }
604 +
605 + return pg;
606 +
607 + bad:
608 + free_priority_group(pg, ti);
609 + return NULL;
610 +}
611 +
612 +static int parse_features(struct arg_set *as, struct multipath *m,
613 + struct dm_target *ti)
614 +{
615 + int r;
616 + unsigned argc;
617 +
618 + static struct param _params[] = {
619 + {0, 1, ESTR("invalid number of feature args")},
620 + };
621 +
622 + r = read_param(_params, shift(as), &argc, &ti->error);
623 + if (r)
624 + return -EINVAL;
625 +
626 + if (!argc)
627 + return 0;
628 +
629 + if (!strnicmp(shift(as), MESG_STR("queue_if_no_path")))
630 + return queue_if_no_path(m, 1);
631 + else {
632 + ti->error = "Unrecognised multipath feature request";
633 + return -EINVAL;
634 + }
635 +}
636 +
637 +static int multipath_ctr(struct dm_target *ti, unsigned int argc,
638 + char **argv)
639 +{
640 + /* target parameters */
641 + static struct param _params[] = {
642 + {1, 1024, ESTR("invalid number of priority groups")},
643 + };
644 +
645 + int r;
646 + struct multipath *m;
647 + struct arg_set as;
648 +
649 + as.argc = argc;
650 + as.argv = argv;
651 +
652 + m = alloc_multipath();
653 + if (!m) {
654 + ti->error = ESTR("can't allocate multipath");
655 + return -EINVAL;
656 + }
657 +
658 + r = parse_features(&as, m, ti);
659 + if (r)
660 + goto bad;
661 +
662 + r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
663 + if (r)
664 + goto bad;
665 +
666 + /* parse the priority groups */
667 + while (as.argc) {
668 + struct priority_group *pg;
669 + pg = parse_priority_group(&as, m, ti);
670 + if (!pg) {
671 + r = -EINVAL;
672 + goto bad;
673 + }
674 +
675 + m->nr_valid_paths += pg->nr_paths;
676 + list_add_tail(&pg->list, &m->priority_groups);
677 + }
678 +
679 + ti->private = m;
680 + m->ti = ti;
681 +
682 + return 0;
683 +
684 + bad:
685 + free_multipath(m);
686 + return r;
687 +}
688 +
689 +static void multipath_dtr(struct dm_target *ti)
690 +{
691 + struct multipath *m = (struct multipath *) ti->private;
692 + free_multipath(m);
693 +}
694 +
695 +static int multipath_map(struct dm_target *ti, struct bio *bio,
696 + union map_info *map_context)
697 +{
698 + int r;
699 + struct mpath_io *mpio;
700 + struct multipath *m = (struct multipath *) ti->private;
701 +
702 + mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
703 + dm_bio_record(&mpio->details, bio);
704 +
705 + map_context->ptr = mpio;
706 + bio->bi_rw |= (1 << BIO_RW_FAILFAST);
707 + r = map_io(m, bio, mpio, 0);
708 + if (r < 0)
709 + mempool_free(mpio, m->mpio_pool);
710 +
711 + return r;
712 +}
713 +
714 +static int fail_path(struct pgpath *pgpath)
715 +{
716 + unsigned long flags;
717 + struct multipath *m;
718 +
719 + spin_lock_irqsave(&pgpath->queued_lock, flags);
720 +
721 + if (!pgpath->path.is_active)
722 + goto out;
723 +
724 + m = pgpath->pg->m;
725 +
726 + pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
727 + pgpath->path.is_active = 0;
728 + pgpath->fail_count++;
729 +
730 + spin_lock(&m->lock);
731 + m->nr_valid_paths--;
732 +
733 + if (pgpath == m->current_pgpath)
734 + m->current_pgpath = NULL;
735 +
736 + spin_unlock(&m->lock);
737 + schedule_work(&m->trigger_event);
738 +
739 +out:
740 + spin_unlock_irqrestore(&pgpath->queued_lock, flags);
741 +
742 + return 0;
743 +}
744 +
745 +static int reinstate_path(struct pgpath *pgpath)
746 +{
747 + int r = 0;
748 + unsigned long flags;
749 + struct multipath *m;
750 +
751 + spin_lock_irqsave(&pgpath->queued_lock, flags);
752 +
753 + if (pgpath->path.is_active)
754 + goto out;
755 +
756 + if (!pgpath->pg->ps.type) {
757 + DMWARN("Reinstate path not supported by path selector %s",
758 + pgpath->pg->ps.type->name);
759 + r = -EINVAL;
760 + goto out;
761 + }
762 +
763 + r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
764 + if (r)
765 + goto out;
766 +
767 + pgpath->path.is_active = 1;
768 + m = pgpath->pg->m;
769 +
770 + spin_lock(&m->lock);
771 + m->current_pgpath = NULL;
772 + if (!m->nr_valid_paths++)
773 + schedule_work(&m->process_queued_ios);
774 + spin_unlock(&m->lock);
775 +
776 + schedule_work(&m->trigger_event);
777 +
778 +out:
779 + spin_unlock_irqrestore(&pgpath->queued_lock, flags);
780 +
781 + return r;
782 +}
783 +
784 +/*
785 + * Fail or reinstate all matching paths
786 + */
787 +static int action_dev(struct multipath *m, struct dm_dev *dev,
788 + action_fn action)
789 +{
790 + int r = 0;
791 + struct pgpath *pgpath;
792 + struct priority_group *pg;
793 +
794 + list_for_each_entry(pg, &m->priority_groups, list) {
795 + list_for_each_entry(pgpath, &pg->pgpaths, list) {
796 + if (pgpath->path.dev == dev)
797 + r = action(pgpath);
798 + }
799 + }
800 +
801 + return r;
802 +}
803 +
804 +static void bypass_pg(struct multipath *m, struct priority_group *pg,
805 + int bypassed)
806 +{
807 + unsigned long flags;
808 +
809 + spin_lock_irqsave(&m->lock, flags);
810 + pg->bypassed = bypassed;
811 + m->current_pgpath = NULL;
812 + m->current_pg = NULL;
813 + spin_unlock_irqrestore(&m->lock, flags);
814 +
815 + schedule_work(&m->trigger_event);
816 +}
817 +
818 +/*
819 + * Set/clear bypassed status of a PG.
820 + * PG numbering goes 1, 2, 3...
821 + */
822 +static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
823 +{
824 + struct priority_group *pg;
825 + unsigned pgnum;
826 +
827 + if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum)
828 + goto error;
829 +
830 + list_for_each_entry(pg, &m->priority_groups, list) {
831 + if (--pgnum)
832 + continue;
833 +
834 + bypass_pg(m, pg, bypassed);
835 +
836 + return 0;
837 + }
838 +
839 +error:
840 + DMWARN("invalid PG number supplied to bypass_pg");
841 + return -EINVAL;
842 +}
843 +
844 +static int do_end_io(struct multipath *m, struct bio *bio,
845 + int error, struct mpath_io *mpio)
846 +{
847 + unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
848 +
849 + if (error) {
850 + spin_lock(&m->lock);
851 + if (!m->nr_valid_paths) {
852 + spin_unlock(&m->lock);
853 + return -EIO;
854 + }
855 + spin_unlock(&m->lock);
856 +
857 + if (err_flags & MP_FAIL_PATH)
858 + fail_path(mpio->pgpath);
859 +
860 + if (err_flags & MP_BYPASS_PG)
861 + bypass_pg(m, mpio->pgpath->pg, 1);
862 +
863 + if (err_flags & MP_ERROR_IO)
864 + return -EIO;
865 +
866 + dm_bio_restore(&mpio->details, bio);
867 +
868 + /* queue for the daemon to resubmit or fail */
869 + spin_lock(&m->lock);
870 + bio_list_add(&m->queued_ios, bio);
871 + m->queue_size++;
872 + if (!m->queue_io)
873 + schedule_work(&m->process_queued_ios);
874 + spin_unlock(&m->lock);
875 +
876 + return 1; /* io not complete */
877 + }
878 +
879 + return 0;
880 +}
881 +
882 +static int multipath_end_io(struct dm_target *ti, struct bio *bio,
883 + int error, union map_info *map_context)
884 +{
885 + struct multipath *m = (struct multipath *) ti->private;
886 + struct mpath_io *mpio = (struct mpath_io *) map_context->ptr;
887 + int r;
888 +
889 + r = do_end_io(m, bio, error, mpio);
890 + if (r <= 0)
891 + mempool_free(mpio, m->mpio_pool);
892 +
893 + return r;
894 +}
895 +
896 +static void multipath_suspend(struct dm_target *ti)
897 +{
898 + struct multipath *m = (struct multipath *) ti->private;
899 +
900 +
901 +}
902 +
903 +static void multipath_resume(struct dm_target *ti)
904 +{
905 + struct multipath *m = (struct multipath *) ti->private;
906 +
907 +
908 +}
909 +
910 +/*
911 + * Info string has the following format:
912 + * num_multipath_feature_args [multipath_feature_args]*
913 + * num_groups [A|D|E num_paths num_selector_args [path_dev A|F fail_count [selector_args]* ]+ ]+
914 + *
915 + * Table string has the following format (identical to the constructor string):
916 + * num_feature_args [features_args]*
917 + * num_groups [priority selector-name num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
918 + */
919 +static int multipath_status(struct dm_target *ti, status_type_t type,
920 + char *result, unsigned int maxlen)
921 +{
922 + int sz = 0;
923 + unsigned long flags;
924 + struct multipath *m = (struct multipath *) ti->private;
925 + struct priority_group *pg;
926 + struct pgpath *p;
927 + char buffer[32];
928 + char state;
929 +
930 + /* Features */
931 + spin_lock_irqsave(&m->lock, flags);
932 + if (type == STATUSTYPE_INFO)
933 + DMEMIT("1 %u ", m->queue_size);
934 + else if (m->queue_if_no_path)
935 + DMEMIT("1 queue_if_no_path ");
936 + else
937 + DMEMIT("0 ");
938 + spin_unlock_irqrestore(&m->lock, flags);
939 +
940 + DMEMIT("%u ", m->nr_priority_groups);
941 +
942 + switch (type) {
943 + case STATUSTYPE_INFO:
944 +
945 + list_for_each_entry(pg, &m->priority_groups, list) {
946 + if (pg->bypassed)
947 + state = 'D'; /* Disabled */
948 + else if (pg == m->current_pg)
949 + state = 'A'; /* Currently Active */
950 + else
951 + state = 'E'; /* Enabled */
952 +
953 + DMEMIT("%c %u %u ", state, pg->nr_paths,
954 + pg->ps.type->info_args);
955 +
956 + list_for_each_entry(p, &pg->pgpaths, list) {
957 + format_dev_t(buffer, p->path.dev->bdev->bd_dev);
958 + spin_lock_irqsave(&p->queued_lock, flags);
959 + DMEMIT("%s %s %u ", buffer,
960 + p->path.is_active ? "A" : "F",
961 + p->fail_count);
962 + if (pg->ps.type->status)
963 + sz += pg->ps.type->status(&pg->ps,
964 + &p->path, type, result + sz,
965 + maxlen - sz);
966 + spin_unlock_irqrestore(&p->queued_lock, flags);
967 + }
968 + }
969 + break;
970 +
971 + case STATUSTYPE_TABLE:
972 + list_for_each_entry(pg, &m->priority_groups, list) {
973 + DMEMIT("%s %u %u ", pg->ps.type->name,
974 + pg->nr_paths, pg->ps.type->table_args);
975 +
976 + list_for_each_entry(p, &pg->pgpaths, list) {
977 + format_dev_t(buffer, p->path.dev->bdev->bd_dev);
978 + DMEMIT("%s ", buffer);
979 + if (pg->ps.type->status)
980 + sz += pg->ps.type->status(&pg->ps,
981 + &p->path, type, result + sz,
982 + maxlen - sz);
983 + }
984 + }
985 + break;
986 + }
987 +
988 + return 0;
989 +}
990 +
991 +static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
992 +{
993 + int r;
994 + struct dm_dev *dev;
995 + struct multipath *m = (struct multipath *) ti->private;
996 + action_fn action;
997 +
998 + if (argc == 1) {
999 + if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
1000 + return queue_if_no_path(m, 1);
1001 + else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
1002 + return queue_if_no_path(m, 0);
1003 + }
1004 +
1005 + if (argc != 2)
1006 + goto error;
1007 +
1008 + if (!strnicmp(argv[0], MESG_STR("disable_group")))
1009 + return bypass_pg_num(m, argv[1], 1);
1010 + else if (!strnicmp(argv[0], MESG_STR("enable_group")))
1011 + return bypass_pg_num(m, argv[1], 0);
1012 + else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
1013 + action = reinstate_path;
1014 + else if (!strnicmp(argv[0], MESG_STR("fail_path")))
1015 + action = fail_path;
1016 + else
1017 + goto error;
1018 +
1019 + r = dm_get_device(ti, argv[1], ti->begin, ti->len,
1020 + dm_table_get_mode(ti->table), &dev);
1021 + if (r) {
1022 + DMWARN("dm-multipath message: error getting device %s",
1023 + argv[1]);
1024 + return -EINVAL;
1025 + }
1026 +
1027 + r = action_dev(m, dev, action);
1028 +
1029 + dm_put_device(ti, dev);
1030 +
1031 + return r;
1032 +
1033 +error:
1034 + DMWARN("Unrecognised multipath message received.");
1035 + return -EINVAL;
1036 +}
1037 +
1038 +/*-----------------------------------------------------------------
1039 + * Module setup
1040 + *---------------------------------------------------------------*/
1041 +static struct target_type multipath_target = {
1042 + .name = "multipath",
1043 + .version = {1, 0, 3},
1044 + .module = THIS_MODULE,
1045 + .ctr = multipath_ctr,
1046 + .dtr = multipath_dtr,
1047 + .map = multipath_map,
1048 + .end_io = multipath_end_io,
1049 + .suspend = multipath_suspend,
1050 + .resume = multipath_resume,
1051 + .status = multipath_status,
1052 + .message = multipath_message,
1053 +};
1054 +
1055 +static int __init dm_multipath_init(void)
1056 +{
1057 + int r;
1058 +
1059 + /* allocate a slab for the dm_ios */
1060 + _mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io),
1061 + 0, 0, NULL, NULL);
1062 + if (!_mpio_cache)
1063 + return -ENOMEM;
1064 +
1065 + r = dm_register_target(&multipath_target);
1066 + if (r < 0) {
1067 + DMERR("%s: register failed %d", multipath_target.name, r);
1068 + kmem_cache_destroy(_mpio_cache);
1069 + return -EINVAL;
1070 + }
1071 +
1072 + DMINFO("dm-multipath version %u.%u.%u loaded",
1073 + multipath_target.version[0], multipath_target.version[1],
1074 + multipath_target.version[2]);
1075 +
1076 + return r;
1077 +}
1078 +
1079 +static void __exit dm_multipath_exit(void)
1080 +{
1081 + int r;
1082 +
1083 + r = dm_unregister_target(&multipath_target);
1084 + if (r < 0)
1085 + DMERR("%s: target unregister failed %d",
1086 + multipath_target.name, r);
1087 + kmem_cache_destroy(_mpio_cache);
1088 +}
1089 +
1090 +EXPORT_SYMBOL(dm_pg_init_complete);
1091 +
1092 +module_init(dm_multipath_init);
1093 +module_exit(dm_multipath_exit);
1094 +
1095 +MODULE_DESCRIPTION(DM_NAME " multipath target");
1096 +MODULE_AUTHOR("Sistina software <dm@uk.sistina.com>");
1097 +MODULE_LICENSE("GPL");
1098 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/dm-mpath.h linux-dsd/drivers/md/dm-mpath.h
1099 --- linux-2.6.10-gentoo/drivers/md/dm-mpath.h 1970-01-01 01:00:00.000000000 +0100
1100 +++ linux-dsd/drivers/md/dm-mpath.h 2004-12-25 04:28:05.547016576 +0000
1101 @@ -0,0 +1,25 @@
1102 +/*
1103 + * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
1104 + *
1105 + * This file is released under the GPL.
1106 + *
1107 + * Multipath.
1108 + */
1109 +
1110 +#ifndef DM_MPATH_H
1111 +#define DM_MPATH_H
1112 +
1113 +#include <linux/device-mapper.h>
1114 +
1115 +struct path {
1116 + struct dm_dev *dev; /* Read-only */
1117 + unsigned is_active; /* Read-only */
1118 +
1119 + void *pscontext; /* For path-selector use */
1120 + void *hwhcontext; /* For hw-handler use */
1121 +};
1122 +
1123 +/* Callback for hwh_pg_init_fn to use when complete */
1124 +void dm_pg_init_complete(struct path *path, unsigned err_flags);
1125 +
1126 +#endif
1127 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/dm-path-selector.c linux-dsd/drivers/md/dm-path-selector.c
1128 --- linux-2.6.10-gentoo/drivers/md/dm-path-selector.c 1970-01-01 01:00:00.000000000 +0100
1129 +++ linux-dsd/drivers/md/dm-path-selector.c 2004-12-25 04:28:05.547016576 +0000
1130 @@ -0,0 +1,156 @@
1131 +/*
1132 + * Copyright (C) 2003 Sistina Software.
1133 + * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
1134 + *
1135 + * Module Author: Heinz Mauelshagen
1136 + *
1137 + * This file is released under the GPL.
1138 + *
1139 + * Path selector registration.
1140 + */
1141 +
1142 +#include "dm.h"
1143 +#include "dm-path-selector.h"
1144 +
1145 +#include <linux/slab.h>
1146 +
1147 +struct ps_internal {
1148 + struct path_selector_type pst;
1149 +
1150 + struct list_head list;
1151 + long use;
1152 +};
1153 +
1154 +#define pst_to_psi(__pst) container_of((__pst), struct ps_internal, pst)
1155 +
1156 +static LIST_HEAD(_path_selectors);
1157 +static DECLARE_RWSEM(_ps_lock);
1158 +
1159 +struct ps_internal *__find_path_selector_type(const char *name)
1160 +{
1161 + struct ps_internal *psi;
1162 +
1163 + list_for_each_entry (psi, &_path_selectors, list) {
1164 + if (!strcmp(name, psi->pst.name))
1165 + return psi;
1166 + }
1167 +
1168 + return NULL;
1169 +}
1170 +
1171 +static struct ps_internal *get_path_selector(const char *name)
1172 +{
1173 + struct ps_internal *psi;
1174 +
1175 + down_read(&_ps_lock);
1176 + psi = __find_path_selector_type(name);
1177 + if (psi) {
1178 + if ((psi->use == 0) && !try_module_get(psi->pst.module))
1179 + psi = NULL;
1180 + else
1181 + psi->use++;
1182 + }
1183 + up_read(&_ps_lock);
1184 +
1185 + return psi;
1186 +}
1187 +
1188 +struct path_selector_type *dm_get_path_selector(const char *name)
1189 +{
1190 + struct ps_internal *psi;
1191 +
1192 + if (!name)
1193 + return NULL;
1194 +
1195 + psi = get_path_selector(name);
1196 + if (!psi) {
1197 + request_module("dm-%s", name);
1198 + psi = get_path_selector(name);
1199 + }
1200 +
1201 + return psi ? &psi->pst : NULL;
1202 +}
1203 +
1204 +void dm_put_path_selector(struct path_selector_type *pst)
1205 +{
1206 + struct ps_internal *psi;
1207 +
1208 + if (!pst)
1209 + return;
1210 +
1211 + down_read(&_ps_lock);
1212 + psi = __find_path_selector_type(pst->name);
1213 + if (!psi)
1214 + goto out;
1215 +
1216 + if (--psi->use == 0)
1217 + module_put(psi->pst.module);
1218 +
1219 + if (psi->use < 0)
1220 + BUG();
1221 +
1222 +out:
1223 + up_read(&_ps_lock);
1224 +}
1225 +
1226 +static struct ps_internal *_alloc_path_selector(struct path_selector_type *pst)
1227 +{
1228 + struct ps_internal *psi = kmalloc(sizeof(*psi), GFP_KERNEL);
1229 +
1230 + if (psi) {
1231 + memset(psi, 0, sizeof(*psi));
1232 + psi->pst = *pst;
1233 + }
1234 +
1235 + return psi;
1236 +}
1237 +
1238 +int dm_register_path_selector(struct path_selector_type *pst)
1239 +{
1240 + int r = 0;
1241 + struct ps_internal *psi = _alloc_path_selector(pst);
1242 +
1243 + if (!psi)
1244 + return -ENOMEM;
1245 +
1246 + down_write(&_ps_lock);
1247 +
1248 + if (__find_path_selector_type(pst->name)) {
1249 + kfree(psi);
1250 + r = -EEXIST;
1251 + } else
1252 + list_add(&psi->list, &_path_selectors);
1253 +
1254 + up_write(&_ps_lock);
1255 +
1256 + return r;
1257 +}
1258 +
1259 +int dm_unregister_path_selector(struct path_selector_type *pst)
1260 +{
1261 + struct ps_internal *psi;
1262 +
1263 + down_write(&_ps_lock);
1264 +
1265 + psi = __find_path_selector_type(pst->name);
1266 + if (!psi) {
1267 + up_write(&_ps_lock);
1268 + return -EINVAL;
1269 + }
1270 +
1271 + if (psi->use) {
1272 + up_write(&_ps_lock);
1273 + return -ETXTBSY;
1274 + }
1275 +
1276 + list_del(&psi->list);
1277 +
1278 + up_write(&_ps_lock);
1279 +
1280 + kfree(psi);
1281 +
1282 + return 0;
1283 +}
1284 +
1285 +EXPORT_SYMBOL(dm_register_path_selector);
1286 +EXPORT_SYMBOL(dm_unregister_path_selector);
1287 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/dm-path-selector.h linux-dsd/drivers/md/dm-path-selector.h
1288 --- linux-2.6.10-gentoo/drivers/md/dm-path-selector.h 1970-01-01 01:00:00.000000000 +0100
1289 +++ linux-dsd/drivers/md/dm-path-selector.h 2004-12-25 04:28:05.548016424 +0000
1290 @@ -0,0 +1,103 @@
1291 +/*
1292 + * Copyright (C) 2003 Sistina Software.
1293 + * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
1294 + *
1295 + * Module Author: Heinz Mauelshagen
1296 + *
1297 + * This file is released under the GPL.
1298 + *
1299 + * Path-Selector registration.
1300 + */
1301 +
1302 +#ifndef DM_PATH_SELECTOR_H
1303 +#define DM_PATH_SELECTOR_H
1304 +
1305 +#include <linux/device-mapper.h>
1306 +
1307 +#include "dm-mpath.h"
1308 +
1309 +/*
1310 + * We provide an abstraction for the code that chooses which path
1311 + * to send some io down.
1312 + */
1313 +struct path_selector_type;
1314 +struct path_selector {
1315 + struct path_selector_type *type;
1316 + void *context;
1317 +};
1318 +
1319 +/*
1320 + * Constructs a path selector object, takes custom arguments
1321 + */
1322 +typedef int (*ps_ctr_fn) (struct path_selector *ps);
1323 +typedef void (*ps_dtr_fn) (struct path_selector *ps);
1324 +
1325 +/*
1326 + * Add an opaque path object, along with some selector specific
1327 + * path args (eg, path priority).
1328 + */
1329 +typedef int (*ps_add_path_fn) (struct path_selector *ps, struct path *path,
1330 + int argc, char **argv, char **error);
1331 +
1332 +/*
1333 + * Chooses a path for this io, if no paths are available then
1334 + * NULL will be returned.
1335 + *
1336 + * repeat_count is the number of times to use the path before
1337 + * calling the function again. 0 means don't call it again unless
1338 + * the path fails.
1339 + */
1340 +typedef struct path *(*ps_select_path_fn) (struct path_selector *ps,
1341 + unsigned *repeat_count);
1342 +
1343 +/*
1344 + * Notify the selector that a path has failed.
1345 + */
1346 +typedef void (*ps_fail_path_fn) (struct path_selector *ps,
1347 + struct path *p);
1348 +
1349 +/*
1350 + * Ask selector to reinstate a path.
1351 + */
1352 +typedef int (*ps_reinstate_path_fn) (struct path_selector *ps,
1353 + struct path *p);
1354 +
1355 +/*
1356 + * Table content based on parameters added in ps_add_path_fn
1357 + * or path selector status
1358 + */
1359 +typedef int (*ps_status_fn) (struct path_selector *ps,
1360 + struct path *path,
1361 + status_type_t type,
1362 + char *result, unsigned int maxlen);
1363 +
1364 +/* Information about a path selector type */
1365 +struct path_selector_type {
1366 + char *name;
1367 + struct module *module;
1368 +
1369 + unsigned int table_args;
1370 + unsigned int info_args;
1371 + ps_ctr_fn ctr;
1372 + ps_dtr_fn dtr;
1373 +
1374 + ps_add_path_fn add_path;
1375 + ps_fail_path_fn fail_path;
1376 + ps_reinstate_path_fn reinstate_path;
1377 + ps_select_path_fn select_path;
1378 + ps_status_fn status;
1379 +};
1380 +
1381 +/* Register a path selector */
1382 +int dm_register_path_selector(struct path_selector_type *type);
1383 +
1384 +/* Unregister a path selector */
1385 +int dm_unregister_path_selector(struct path_selector_type *type);
1386 +
1387 +/* Returns a registered path selector type */
1388 +struct path_selector_type *dm_get_path_selector(const char *name);
1389 +
1390 +/* Releases a path selector */
1391 +void dm_put_path_selector(struct path_selector_type *pst);
1392 +
1393 +#endif
1394 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/Kconfig linux-dsd/drivers/md/Kconfig
1395 --- linux-2.6.10-gentoo/drivers/md/Kconfig 2004-12-25 04:27:21.925648032 +0000
1396 +++ linux-dsd/drivers/md/Kconfig 2004-12-25 04:28:40.864647480 +0000
1397 @@ -227,6 +227,12 @@ config DM_ZERO
1398 A target that discards writes, and returns all zeroes for
1399 reads. Useful in some recovery situations.
1400
1401 +config DM_MULTIPATH
1402 + tristate "Multipath target (EXPERIMENTAL)"
1403 + depends on BLK_DEV_DM && EXPERIMENTAL
1404 + ---help---
1405 + Allow volume managers to support multipath hardware.
1406 +
1407 config BLK_DEV_DM_BBR
1408 tristate "Bad Block Relocation Device Target (EXPERIMENTAL)"
1409 depends on BLK_DEV_DM && EXPERIMENTAL
1410 diff -urNpX dontdiff linux-2.6.10-gentoo/drivers/md/Makefile linux-dsd/drivers/md/Makefile
1411 --- linux-2.6.10-gentoo/drivers/md/Makefile 2004-12-25 04:27:21.925648032 +0000
1412 +++ linux-dsd/drivers/md/Makefile 2004-12-25 04:28:05.543017184 +0000
1413 @@ -4,6 +4,7 @@
1414
1415 dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
1416 dm-ioctl.o dm-io.o kcopyd.o
1417 +dm-multipath-objs := dm-path-selector.o dm-mpath.o
1418 dm-snapshot-objs := dm-snap.o dm-exception-store.o
1419 dm-mirror-objs := dm-log.o dm-raid1.o
1420 raid6-objs := raid6main.o raid6algos.o raid6recov.o raid6tables.o \
1421 @@ -28,6 +29,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
1422 obj-$(CONFIG_BLK_DEV_MD) += md.o
1423 obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
1424 obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
1425 +obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o
1426 obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
1427 obj-$(CONFIG_DM_MIRROR) += dm-mirror.o
1428 obj-$(CONFIG_DM_ZERO) += dm-zero.o

  ViewVC Help
Powered by ViewVC 1.1.20